7e983fa7788643b30407aaf55303d5fb5165f91f
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::script::{Script,Builder};
11 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
12 use bitcoin::util::sighash;
13 use bitcoin::consensus::encode;
14
15 use bitcoin::hashes::Hash;
16 use bitcoin::hashes::sha256::Hash as Sha256;
17 use bitcoin::hashes::sha256d::Hash as Sha256d;
18 use bitcoin::hash_types::{Txid, BlockHash};
19
20 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
21 use bitcoin::secp256k1::{PublicKey,SecretKey};
22 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
23 use bitcoin::secp256k1;
24
25 use crate::ln::{PaymentPreimage, PaymentHash};
26 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
27 use crate::ln::msgs;
28 use crate::ln::msgs::DecodeError;
29 use crate::ln::script::{self, ShutdownScript};
30 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
31 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
32 use crate::ln::chan_utils;
33 use crate::ln::onion_utils::HTLCFailReason;
34 use crate::chain::BestBlock;
35 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
36 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
37 use crate::chain::transaction::{OutPoint, TransactionData};
38 use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
39 use crate::events::ClosureReason;
40 use crate::routing::gossip::NodeId;
41 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
42 use crate::util::logger::Logger;
43 use crate::util::errors::APIError;
44 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
45 use crate::util::scid_utils::scid_from_parts;
46
47 use crate::io;
48 use crate::prelude::*;
49 use core::{cmp,mem,fmt};
50 use core::ops::Deref;
51 #[cfg(any(test, fuzzing, debug_assertions))]
52 use crate::sync::Mutex;
53 use bitcoin::hashes::hex::ToHex;
54
55 #[cfg(test)]
56 pub struct ChannelValueStat {
57         pub value_to_self_msat: u64,
58         pub channel_value_msat: u64,
59         pub channel_reserve_msat: u64,
60         pub pending_outbound_htlcs_amount_msat: u64,
61         pub pending_inbound_htlcs_amount_msat: u64,
62         pub holding_cell_outbound_amount_msat: u64,
63         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
64         pub counterparty_dust_limit_msat: u64,
65 }
66
67 pub struct AvailableBalances {
68         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
69         pub balance_msat: u64,
70         /// Total amount available for our counterparty to send to us.
71         pub inbound_capacity_msat: u64,
72         /// Total amount available for us to send to our counterparty.
73         pub outbound_capacity_msat: u64,
74         /// The maximum value we can assign to the next outbound HTLC
75         pub next_outbound_htlc_limit_msat: u64,
76         /// The minimum value we can assign to the next outbound HTLC
77         pub next_outbound_htlc_minimum_msat: u64,
78 }
79
80 #[derive(Debug, Clone, Copy, PartialEq)]
81 enum FeeUpdateState {
82         // Inbound states mirroring InboundHTLCState
83         RemoteAnnounced,
84         AwaitingRemoteRevokeToAnnounce,
85         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
86         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
87         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
88         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
89         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
90
91         // Outbound state can only be `LocalAnnounced` or `Committed`
92         Outbound,
93 }
94
95 enum InboundHTLCRemovalReason {
96         FailRelay(msgs::OnionErrorPacket),
97         FailMalformed(([u8; 32], u16)),
98         Fulfill(PaymentPreimage),
99 }
100
101 enum InboundHTLCState {
102         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
103         /// update_add_htlc message for this HTLC.
104         RemoteAnnounced(PendingHTLCStatus),
105         /// Included in a received commitment_signed message (implying we've
106         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
107         /// state (see the example below). We have not yet included this HTLC in a
108         /// commitment_signed message because we are waiting on the remote's
109         /// aforementioned state revocation. One reason this missing remote RAA
110         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
111         /// is because every time we create a new "state", i.e. every time we sign a
112         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
113         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
114         /// sent provided the per_commitment_point for our current commitment tx.
115         /// The other reason we should not send a commitment_signed without their RAA
116         /// is because their RAA serves to ACK our previous commitment_signed.
117         ///
118         /// Here's an example of how an HTLC could come to be in this state:
119         /// remote --> update_add_htlc(prev_htlc)   --> local
120         /// remote --> commitment_signed(prev_htlc) --> local
121         /// remote <-- revoke_and_ack               <-- local
122         /// remote <-- commitment_signed(prev_htlc) <-- local
123         /// [note that here, the remote does not respond with a RAA]
124         /// remote --> update_add_htlc(this_htlc)   --> local
125         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
126         /// Now `this_htlc` will be assigned this state. It's unable to be officially
127         /// accepted, i.e. included in a commitment_signed, because we're missing the
128         /// RAA that provides our next per_commitment_point. The per_commitment_point
129         /// is used to derive commitment keys, which are used to construct the
130         /// signatures in a commitment_signed message.
131         /// Implies AwaitingRemoteRevoke.
132         ///
133         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
134         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
135         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
136         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
137         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
138         /// channel (before it can then get forwarded and/or removed).
139         /// Implies AwaitingRemoteRevoke.
140         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
141         Committed,
142         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
143         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
144         /// we'll drop it.
145         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
146         /// commitment transaction without it as otherwise we'll have to force-close the channel to
147         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
148         /// anyway). That said, ChannelMonitor does this for us (see
149         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
150         /// our own local state before then, once we're sure that the next commitment_signed and
151         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
152         LocalRemoved(InboundHTLCRemovalReason),
153 }
154
155 struct InboundHTLCOutput {
156         htlc_id: u64,
157         amount_msat: u64,
158         cltv_expiry: u32,
159         payment_hash: PaymentHash,
160         state: InboundHTLCState,
161 }
162
163 enum OutboundHTLCState {
164         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
165         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
166         /// we will promote to Committed (note that they may not accept it until the next time we
167         /// revoke, but we don't really care about that:
168         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
169         ///    money back (though we won't), and,
170         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
171         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
172         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
173         ///    we'll never get out of sync).
174         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
175         /// OutboundHTLCOutput's size just for a temporary bit
176         LocalAnnounced(Box<msgs::OnionPacket>),
177         Committed,
178         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
179         /// the change (though they'll need to revoke before we fail the payment).
180         RemoteRemoved(OutboundHTLCOutcome),
181         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
182         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
183         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
184         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
185         /// remote revoke_and_ack on a previous state before we can do so.
186         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
187         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
188         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
189         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
190         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
191         /// revoke_and_ack to drop completely.
192         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
193 }
194
195 #[derive(Clone)]
196 enum OutboundHTLCOutcome {
197         /// LDK version 0.0.105+ will always fill in the preimage here.
198         Success(Option<PaymentPreimage>),
199         Failure(HTLCFailReason),
200 }
201
202 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
203         fn from(o: Option<HTLCFailReason>) -> Self {
204                 match o {
205                         None => OutboundHTLCOutcome::Success(None),
206                         Some(r) => OutboundHTLCOutcome::Failure(r)
207                 }
208         }
209 }
210
211 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
212         fn into(self) -> Option<&'a HTLCFailReason> {
213                 match self {
214                         OutboundHTLCOutcome::Success(_) => None,
215                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
216                 }
217         }
218 }
219
220 struct OutboundHTLCOutput {
221         htlc_id: u64,
222         amount_msat: u64,
223         cltv_expiry: u32,
224         payment_hash: PaymentHash,
225         state: OutboundHTLCState,
226         source: HTLCSource,
227 }
228
229 /// See AwaitingRemoteRevoke ChannelState for more info
230 enum HTLCUpdateAwaitingACK {
231         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
232                 // always outbound
233                 amount_msat: u64,
234                 cltv_expiry: u32,
235                 payment_hash: PaymentHash,
236                 source: HTLCSource,
237                 onion_routing_packet: msgs::OnionPacket,
238         },
239         ClaimHTLC {
240                 payment_preimage: PaymentPreimage,
241                 htlc_id: u64,
242         },
243         FailHTLC {
244                 htlc_id: u64,
245                 err_packet: msgs::OnionErrorPacket,
246         },
247 }
248
249 /// There are a few "states" and then a number of flags which can be applied:
250 /// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
251 /// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
252 /// move on to ChannelReady.
253 /// Note that PeerDisconnected can be set on both ChannelReady and FundingSent.
254 /// ChannelReady can then get all remaining flags set on it, until we finish shutdown, then we
255 /// move on to ShutdownComplete, at which point most calls into this channel are disallowed.
256 enum ChannelState {
257         /// Implies we have (or are prepared to) send our open_channel/accept_channel message
258         OurInitSent = 1 << 0,
259         /// Implies we have received their open_channel/accept_channel message
260         TheirInitSent = 1 << 1,
261         /// We have sent funding_created and are awaiting a funding_signed to advance to FundingSent.
262         /// Note that this is nonsense for an inbound channel as we immediately generate funding_signed
263         /// upon receipt of funding_created, so simply skip this state.
264         FundingCreated = 4,
265         /// Set when we have received/sent funding_created and funding_signed and are thus now waiting
266         /// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
267         /// and our counterparty consider the funding transaction confirmed.
268         FundingSent = 8,
269         /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
270         /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
271         TheirChannelReady = 1 << 4,
272         /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
273         /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
274         OurChannelReady = 1 << 5,
275         ChannelReady = 64,
276         /// Flag which is set on ChannelReady and FundingSent indicating remote side is considered
277         /// "disconnected" and no updates are allowed until after we've done a channel_reestablish
278         /// dance.
279         PeerDisconnected = 1 << 7,
280         /// Flag which is set on ChannelReady, FundingCreated, and FundingSent indicating the user has
281         /// told us a ChannelMonitor update is pending async persistence somewhere and we should pause
282         /// sending any outbound messages until they've managed to finish.
283         MonitorUpdateInProgress = 1 << 8,
284         /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
285         /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
286         /// messages as then we will be unable to determine which HTLCs they included in their
287         /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
288         /// later.
289         /// Flag is set on ChannelReady.
290         AwaitingRemoteRevoke = 1 << 9,
291         /// Flag which is set on ChannelReady or FundingSent after receiving a shutdown message from
292         /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
293         /// to respond with our own shutdown message when possible.
294         RemoteShutdownSent = 1 << 10,
295         /// Flag which is set on ChannelReady or FundingSent after sending a shutdown message. At this
296         /// point, we may not add any new HTLCs to the channel.
297         LocalShutdownSent = 1 << 11,
298         /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
299         /// to drop us, but we store this anyway.
300         ShutdownComplete = 4096,
301 }
302 const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
303 const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
304
305 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
306
307 pub const DEFAULT_MAX_HTLCS: u16 = 50;
308
309 pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
310         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
311         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
312         if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
313 }
314
315 #[cfg(not(test))]
316 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
317 #[cfg(test)]
318 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
319
320 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
321
322 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
323 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
324 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
325 /// `holder_max_htlc_value_in_flight_msat`.
326 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
327
328 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
329 /// `option_support_large_channel` (aka wumbo channels) is not supported.
330 /// It's 2^24 - 1.
331 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
332
333 /// Total bitcoin supply in satoshis.
334 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
335
336 /// The maximum network dust limit for standard script formats. This currently represents the
337 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
338 /// transaction non-standard and thus refuses to relay it.
339 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
340 /// implementations use this value for their dust limit today.
341 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
342
343 /// The maximum channel dust limit we will accept from our counterparty.
344 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
345
346 /// The dust limit is used for both the commitment transaction outputs as well as the closing
347 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
348 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
349 /// In order to avoid having to concern ourselves with standardness during the closing process, we
350 /// simply require our counterparty to use a dust limit which will leave any segwit output
351 /// standard.
352 /// See <https://github.com/lightning/bolts/issues/905> for more details.
353 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
354
355 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
356 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
357
358 /// Used to return a simple Error back to ChannelManager. Will get converted to a
359 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
360 /// channel_id in ChannelManager.
361 pub(super) enum ChannelError {
362         Ignore(String),
363         Warn(String),
364         Close(String),
365 }
366
367 impl fmt::Debug for ChannelError {
368         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
369                 match self {
370                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
371                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
372                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
373                 }
374         }
375 }
376
377 macro_rules! secp_check {
378         ($res: expr, $err: expr) => {
379                 match $res {
380                         Ok(thing) => thing,
381                         Err(_) => return Err(ChannelError::Close($err)),
382                 }
383         };
384 }
385
386 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
387 /// our counterparty or not. However, we don't want to announce updates right away to avoid
388 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
389 /// our channel_update message and track the current state here.
390 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
391 #[derive(Clone, Copy, PartialEq)]
392 pub(super) enum ChannelUpdateStatus {
393         /// We've announced the channel as enabled and are connected to our peer.
394         Enabled,
395         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
396         DisabledStaged(u8),
397         /// Our channel is live again, but we haven't announced the channel as enabled yet.
398         EnabledStaged(u8),
399         /// We've announced the channel as disabled.
400         Disabled,
401 }
402
403 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
404 #[derive(PartialEq)]
405 pub enum AnnouncementSigsState {
406         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
407         /// we sent the last `AnnouncementSignatures`.
408         NotSent,
409         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
410         /// This state never appears on disk - instead we write `NotSent`.
411         MessageSent,
412         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
413         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
414         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
415         /// they send back a `RevokeAndACK`.
416         /// This state never appears on disk - instead we write `NotSent`.
417         Committed,
418         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
419         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
420         PeerReceived,
421 }
422
423 /// An enum indicating whether the local or remote side offered a given HTLC.
424 enum HTLCInitiator {
425         LocalOffered,
426         RemoteOffered,
427 }
428
429 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
430 struct HTLCStats {
431         pending_htlcs: u32,
432         pending_htlcs_value_msat: u64,
433         on_counterparty_tx_dust_exposure_msat: u64,
434         on_holder_tx_dust_exposure_msat: u64,
435         holding_cell_msat: u64,
436         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
437 }
438
439 /// An enum gathering stats on commitment transaction, either local or remote.
440 struct CommitmentStats<'a> {
441         tx: CommitmentTransaction, // the transaction info
442         feerate_per_kw: u32, // the feerate included to build the transaction
443         total_fee_sat: u64, // the total fee included in the transaction
444         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
445         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
446         local_balance_msat: u64, // local balance before fees but considering dust limits
447         remote_balance_msat: u64, // remote balance before fees but considering dust limits
448         preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
449 }
450
451 /// Used when calculating whether we or the remote can afford an additional HTLC.
452 struct HTLCCandidate {
453         amount_msat: u64,
454         origin: HTLCInitiator,
455 }
456
457 impl HTLCCandidate {
458         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
459                 Self {
460                         amount_msat,
461                         origin,
462                 }
463         }
464 }
465
466 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
467 /// description
468 enum UpdateFulfillFetch {
469         NewClaim {
470                 monitor_update: ChannelMonitorUpdate,
471                 htlc_value_msat: u64,
472                 msg: Option<msgs::UpdateFulfillHTLC>,
473         },
474         DuplicateClaim {},
475 }
476
477 /// The return type of get_update_fulfill_htlc_and_commit.
478 pub enum UpdateFulfillCommitFetch<'a> {
479         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
480         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
481         /// previously placed in the holding cell (and has since been removed).
482         NewClaim {
483                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
484                 monitor_update: &'a ChannelMonitorUpdate,
485                 /// The value of the HTLC which was claimed, in msat.
486                 htlc_value_msat: u64,
487         },
488         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
489         /// or has been forgotten (presumably previously claimed).
490         DuplicateClaim {},
491 }
492
493 /// The return value of `monitor_updating_restored`
494 pub(super) struct MonitorRestoreUpdates {
495         pub raa: Option<msgs::RevokeAndACK>,
496         pub commitment_update: Option<msgs::CommitmentUpdate>,
497         pub order: RAACommitmentOrder,
498         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
499         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
500         pub finalized_claimed_htlcs: Vec<HTLCSource>,
501         pub funding_broadcastable: Option<Transaction>,
502         pub channel_ready: Option<msgs::ChannelReady>,
503         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
504 }
505
506 /// The return value of `channel_reestablish`
507 pub(super) struct ReestablishResponses {
508         pub channel_ready: Option<msgs::ChannelReady>,
509         pub raa: Option<msgs::RevokeAndACK>,
510         pub commitment_update: Option<msgs::CommitmentUpdate>,
511         pub order: RAACommitmentOrder,
512         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
513         pub shutdown_msg: Option<msgs::Shutdown>,
514 }
515
516 /// The return type of `force_shutdown`
517 pub(crate) type ShutdownResult = (
518         Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
519         Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
520 );
521
522 /// If the majority of the channels funds are to the fundee and the initiator holds only just
523 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
524 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
525 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
526 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
527 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
528 /// by this multiple without hitting this case, before sending.
529 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
530 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
531 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
532 /// leave the channel less usable as we hold a bigger reserve.
533 #[cfg(any(fuzzing, test))]
534 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
535 #[cfg(not(any(fuzzing, test)))]
536 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
537
538 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
539 /// channel creation on an inbound channel, we simply force-close and move on.
540 /// This constant is the one suggested in BOLT 2.
541 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
542
543 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
544 /// not have enough balance value remaining to cover the onchain cost of this new
545 /// HTLC weight. If this happens, our counterparty fails the reception of our
546 /// commitment_signed including this new HTLC due to infringement on the channel
547 /// reserve.
548 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
549 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
550 /// leads to a channel force-close. Ultimately, this is an issue coming from the
551 /// design of LN state machines, allowing asynchronous updates.
552 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
553
554 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
555 /// commitment transaction fees, with at least this many HTLCs present on the commitment
556 /// transaction (not counting the value of the HTLCs themselves).
557 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
558
559 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
560 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
561 /// ChannelUpdate prompted by the config update. This value was determined as follows:
562 ///
563 ///   * The expected interval between ticks (1 minute).
564 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
565 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
566 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
567 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
568
569 /// The number of ticks that may elapse while we're waiting for a response to a
570 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
571 /// them.
572 ///
573 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
574 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
575
576 struct PendingChannelMonitorUpdate {
577         update: ChannelMonitorUpdate,
578         /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
579         /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
580         /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
581         ///
582         /// [`ChannelManager`]: super::channelmanager::ChannelManager
583         blocked: bool,
584 }
585
586 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
587         (0, update, required),
588         (2, blocked, required),
589 });
590
591 /// Contains everything about the channel including state, and various flags.
592 pub(super) struct ChannelContext<Signer: ChannelSigner> {
593         config: LegacyChannelConfig,
594
595         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
596         // constructed using it. The second element in the tuple corresponds to the number of ticks that
597         // have elapsed since the update occurred.
598         prev_config: Option<(ChannelConfig, usize)>,
599
600         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
601
602         user_id: u128,
603
604         channel_id: [u8; 32],
605         temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
606         channel_state: u32,
607
608         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
609         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
610         // next connect.
611         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
612         // Note that a number of our tests were written prior to the behavior here which retransmits
613         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
614         // many tests.
615         #[cfg(any(test, feature = "_test_utils"))]
616         pub(crate) announcement_sigs_state: AnnouncementSigsState,
617         #[cfg(not(any(test, feature = "_test_utils")))]
618         announcement_sigs_state: AnnouncementSigsState,
619
620         secp_ctx: Secp256k1<secp256k1::All>,
621         channel_value_satoshis: u64,
622
623         latest_monitor_update_id: u64,
624
625         holder_signer: Signer,
626         shutdown_scriptpubkey: Option<ShutdownScript>,
627         destination_script: Script,
628
629         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
630         // generation start at 0 and count up...this simplifies some parts of implementation at the
631         // cost of others, but should really just be changed.
632
633         cur_holder_commitment_transaction_number: u64,
634         cur_counterparty_commitment_transaction_number: u64,
635         value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
636         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
637         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
638         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
639
640         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
641         /// need to ensure we resend them in the order we originally generated them. Note that because
642         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
643         /// sufficient to simply set this to the opposite of any message we are generating as we
644         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
645         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
646         /// send it first.
647         resend_order: RAACommitmentOrder,
648
649         monitor_pending_channel_ready: bool,
650         monitor_pending_revoke_and_ack: bool,
651         monitor_pending_commitment_signed: bool,
652
653         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
654         // responsible for some of the HTLCs here or not - we don't know whether the update in question
655         // completed or not. We currently ignore these fields entirely when force-closing a channel,
656         // but need to handle this somehow or we run the risk of losing HTLCs!
657         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
658         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
659         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
660
661         // pending_update_fee is filled when sending and receiving update_fee.
662         //
663         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
664         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
665         // generating new commitment transactions with exactly the same criteria as inbound/outbound
666         // HTLCs with similar state.
667         pending_update_fee: Option<(u32, FeeUpdateState)>,
668         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
669         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
670         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
671         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
672         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
673         holding_cell_update_fee: Option<u32>,
674         next_holder_htlc_id: u64,
675         next_counterparty_htlc_id: u64,
676         feerate_per_kw: u32,
677
678         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
679         /// when the channel is updated in ways which may impact the `channel_update` message or when a
680         /// new block is received, ensuring it's always at least moderately close to the current real
681         /// time.
682         update_time_counter: u32,
683
684         #[cfg(debug_assertions)]
685         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
686         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
687         #[cfg(debug_assertions)]
688         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
689         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
690
691         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
692         target_closing_feerate_sats_per_kw: Option<u32>,
693
694         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
695         /// update, we need to delay processing it until later. We do that here by simply storing the
696         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
697         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
698
699         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
700         /// transaction. These are set once we reach `closing_negotiation_ready`.
701         #[cfg(test)]
702         pub(crate) closing_fee_limits: Option<(u64, u64)>,
703         #[cfg(not(test))]
704         closing_fee_limits: Option<(u64, u64)>,
705
706         /// Flag that ensures that `accept_inbound_channel` must be called before `funding_created`
707         /// is executed successfully. The reason for this flag is that when the
708         /// `UserConfig::manually_accept_inbound_channels` config flag is set to true, inbound channels
709         /// are required to be manually accepted by the node operator before the `msgs::AcceptChannel`
710         /// message is created and sent out. During the manual accept process, `accept_inbound_channel`
711         /// is called by `ChannelManager::accept_inbound_channel`.
712         ///
713         /// The flag counteracts that a counterparty node could theoretically send a
714         /// `msgs::FundingCreated` message before the node operator has manually accepted an inbound
715         /// channel request made by the counterparty node. That would execute `funding_created` before
716         /// `accept_inbound_channel`, and `funding_created` should therefore not execute successfully.
717         inbound_awaiting_accept: bool,
718
719         /// The hash of the block in which the funding transaction was included.
720         funding_tx_confirmed_in: Option<BlockHash>,
721         funding_tx_confirmation_height: u32,
722         short_channel_id: Option<u64>,
723         /// Either the height at which this channel was created or the height at which it was last
724         /// serialized if it was serialized by versions prior to 0.0.103.
725         /// We use this to close if funding is never broadcasted.
726         channel_creation_height: u32,
727
728         counterparty_dust_limit_satoshis: u64,
729
730         #[cfg(test)]
731         pub(super) holder_dust_limit_satoshis: u64,
732         #[cfg(not(test))]
733         holder_dust_limit_satoshis: u64,
734
735         #[cfg(test)]
736         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
737         #[cfg(not(test))]
738         counterparty_max_htlc_value_in_flight_msat: u64,
739
740         #[cfg(test)]
741         pub(super) holder_max_htlc_value_in_flight_msat: u64,
742         #[cfg(not(test))]
743         holder_max_htlc_value_in_flight_msat: u64,
744
745         /// minimum channel reserve for self to maintain - set by them.
746         counterparty_selected_channel_reserve_satoshis: Option<u64>,
747
748         #[cfg(test)]
749         pub(super) holder_selected_channel_reserve_satoshis: u64,
750         #[cfg(not(test))]
751         holder_selected_channel_reserve_satoshis: u64,
752
753         counterparty_htlc_minimum_msat: u64,
754         holder_htlc_minimum_msat: u64,
755         #[cfg(test)]
756         pub counterparty_max_accepted_htlcs: u16,
757         #[cfg(not(test))]
758         counterparty_max_accepted_htlcs: u16,
759         holder_max_accepted_htlcs: u16,
760         minimum_depth: Option<u32>,
761
762         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
763
764         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
765         funding_transaction: Option<Transaction>,
766
767         counterparty_cur_commitment_point: Option<PublicKey>,
768         counterparty_prev_commitment_point: Option<PublicKey>,
769         counterparty_node_id: PublicKey,
770
771         counterparty_shutdown_scriptpubkey: Option<Script>,
772
773         commitment_secrets: CounterpartyCommitmentSecrets,
774
775         channel_update_status: ChannelUpdateStatus,
776         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
777         /// not complete within a single timer tick (one minute), we should force-close the channel.
778         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
779         /// to DoS us.
780         /// Note that this field is reset to false on deserialization to give us a chance to connect to
781         /// our peer and start the closing_signed negotiation fresh.
782         closing_signed_in_flight: bool,
783
784         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
785         /// This can be used to rebroadcast the channel_announcement message later.
786         announcement_sigs: Option<(Signature, Signature)>,
787
788         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
789         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
790         // be, by comparing the cached values to the fee of the tranaction generated by
791         // `build_commitment_transaction`.
792         #[cfg(any(test, fuzzing))]
793         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
794         #[cfg(any(test, fuzzing))]
795         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
796
797         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
798         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
799         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
800         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
801         /// message until we receive a channel_reestablish.
802         ///
803         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
804         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
805
806         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
807         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
808         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
809         /// unblock the state machine.
810         ///
811         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
812         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
813         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
814         ///
815         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
816         /// [`msgs::RevokeAndACK`] message from the counterparty.
817         sent_message_awaiting_response: Option<usize>,
818
819         #[cfg(any(test, fuzzing))]
820         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
821         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
822         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
823         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
824         // is fine, but as a sanity check in our failure to generate the second claim, we check here
825         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
826         historical_inbound_htlc_fulfills: HashSet<u64>,
827
828         /// This channel's type, as negotiated during channel open
829         channel_type: ChannelTypeFeatures,
830
831         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
832         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
833         // the channel's funding UTXO.
834         //
835         // We also use this when sending our peer a channel_update that isn't to be broadcasted
836         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
837         // associated channel mapping.
838         //
839         // We only bother storing the most recent SCID alias at any time, though our counterparty has
840         // to store all of them.
841         latest_inbound_scid_alias: Option<u64>,
842
843         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
844         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
845         // don't currently support node id aliases and eventually privacy should be provided with
846         // blinded paths instead of simple scid+node_id aliases.
847         outbound_scid_alias: u64,
848
849         // We track whether we already emitted a `ChannelPending` event.
850         channel_pending_event_emitted: bool,
851
852         // We track whether we already emitted a `ChannelReady` event.
853         channel_ready_event_emitted: bool,
854
855         /// The unique identifier used to re-derive the private key material for the channel through
856         /// [`SignerProvider::derive_channel_signer`].
857         channel_keys_id: [u8; 32],
858
859         /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
860         /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
861         /// completes we still need to be able to complete the persistence. Thus, we have to keep a
862         /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
863         pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
864 }
865
866 impl<Signer: ChannelSigner> ChannelContext<Signer> {
867         pub(crate) fn opt_anchors(&self) -> bool {
868                 self.channel_transaction_parameters.opt_anchors.is_some()
869         }
870
871         /// Allowed in any state (including after shutdown)
872         pub fn get_update_time_counter(&self) -> u32 {
873                 self.update_time_counter
874         }
875
876         pub fn get_latest_monitor_update_id(&self) -> u64 {
877                 self.latest_monitor_update_id
878         }
879
880         pub fn should_announce(&self) -> bool {
881                 self.config.announced_channel
882         }
883
884         pub fn is_outbound(&self) -> bool {
885                 self.channel_transaction_parameters.is_outbound_from_holder
886         }
887
888         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
889         /// Allowed in any state (including after shutdown)
890         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
891                 self.config.options.forwarding_fee_base_msat
892         }
893
894         /// Returns true if we've ever received a message from the remote end for this Channel
895         pub fn have_received_message(&self) -> bool {
896                 self.channel_state > (ChannelState::OurInitSent as u32)
897         }
898
899         /// Returns true if this channel is fully established and not known to be closing.
900         /// Allowed in any state (including after shutdown)
901         pub fn is_usable(&self) -> bool {
902                 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
903                 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
904         }
905
906         /// Returns true if this channel is currently available for use. This is a superset of
907         /// is_usable() and considers things like the channel being temporarily disabled.
908         /// Allowed in any state (including after shutdown)
909         pub fn is_live(&self) -> bool {
910                 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
911         }
912
913         // Public utilities:
914
915         pub fn channel_id(&self) -> [u8; 32] {
916                 self.channel_id
917         }
918
919         // Return the `temporary_channel_id` used during channel establishment.
920         //
921         // Will return `None` for channels created prior to LDK version 0.0.115.
922         pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
923                 self.temporary_channel_id
924         }
925
926         pub fn minimum_depth(&self) -> Option<u32> {
927                 self.minimum_depth
928         }
929
930         /// Gets the "user_id" value passed into the construction of this channel. It has no special
931         /// meaning and exists only to allow users to have a persistent identifier of a channel.
932         pub fn get_user_id(&self) -> u128 {
933                 self.user_id
934         }
935
936         /// Gets the channel's type
937         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
938                 &self.channel_type
939         }
940
941         /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
942         /// is_usable() returns true).
943         /// Allowed in any state (including after shutdown)
944         pub fn get_short_channel_id(&self) -> Option<u64> {
945                 self.short_channel_id
946         }
947
948         /// Allowed in any state (including after shutdown)
949         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
950                 self.latest_inbound_scid_alias
951         }
952
953         /// Allowed in any state (including after shutdown)
954         pub fn outbound_scid_alias(&self) -> u64 {
955                 self.outbound_scid_alias
956         }
957
958         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
959         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
960         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
961                 assert_eq!(self.outbound_scid_alias, 0);
962                 self.outbound_scid_alias = outbound_scid_alias;
963         }
964
965         /// Returns the funding_txo we either got from our peer, or were given by
966         /// get_outbound_funding_created.
967         pub fn get_funding_txo(&self) -> Option<OutPoint> {
968                 self.channel_transaction_parameters.funding_outpoint
969         }
970
971         /// Returns the block hash in which our funding transaction was confirmed.
972         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
973                 self.funding_tx_confirmed_in
974         }
975
976         /// Returns the current number of confirmations on the funding transaction.
977         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
978                 if self.funding_tx_confirmation_height == 0 {
979                         // We either haven't seen any confirmation yet, or observed a reorg.
980                         return 0;
981                 }
982
983                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
984         }
985
986         fn get_holder_selected_contest_delay(&self) -> u16 {
987                 self.channel_transaction_parameters.holder_selected_contest_delay
988         }
989
990         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
991                 &self.channel_transaction_parameters.holder_pubkeys
992         }
993
994         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
995                 self.channel_transaction_parameters.counterparty_parameters
996                         .as_ref().map(|params| params.selected_contest_delay)
997         }
998
999         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1000                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1001         }
1002
1003         /// Allowed in any state (including after shutdown)
1004         pub fn get_counterparty_node_id(&self) -> PublicKey {
1005                 self.counterparty_node_id
1006         }
1007
1008         /// Allowed in any state (including after shutdown)
1009         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1010                 self.holder_htlc_minimum_msat
1011         }
1012
1013         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1014         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1015                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1016         }
1017
1018         /// Allowed in any state (including after shutdown)
1019         pub fn get_announced_htlc_max_msat(&self) -> u64 {
1020                 return cmp::min(
1021                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1022                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
1023                         // channel might have been used to route very small values (either by honest users or as DoS).
1024                         self.channel_value_satoshis * 1000 * 9 / 10,
1025
1026                         self.counterparty_max_htlc_value_in_flight_msat
1027                 );
1028         }
1029
1030         /// Allowed in any state (including after shutdown)
1031         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1032                 self.counterparty_htlc_minimum_msat
1033         }
1034
1035         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1036         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1037                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1038         }
1039
1040         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1041                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1042                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1043                         cmp::min(
1044                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1045                                 party_max_htlc_value_in_flight_msat
1046                         )
1047                 })
1048         }
1049
1050         pub fn get_value_satoshis(&self) -> u64 {
1051                 self.channel_value_satoshis
1052         }
1053
1054         pub fn get_fee_proportional_millionths(&self) -> u32 {
1055                 self.config.options.forwarding_fee_proportional_millionths
1056         }
1057
1058         pub fn get_cltv_expiry_delta(&self) -> u16 {
1059                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1060         }
1061
1062         pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
1063                 self.config.options.max_dust_htlc_exposure_msat
1064         }
1065
1066         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1067         pub fn prev_config(&self) -> Option<ChannelConfig> {
1068                 self.prev_config.map(|prev_config| prev_config.0)
1069         }
1070
1071         // Checks whether we should emit a `ChannelPending` event.
1072         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1073                 self.is_funding_initiated() && !self.channel_pending_event_emitted
1074         }
1075
1076         // Returns whether we already emitted a `ChannelPending` event.
1077         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1078                 self.channel_pending_event_emitted
1079         }
1080
1081         // Remembers that we already emitted a `ChannelPending` event.
1082         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1083                 self.channel_pending_event_emitted = true;
1084         }
1085
1086         // Checks whether we should emit a `ChannelReady` event.
1087         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1088                 self.is_usable() && !self.channel_ready_event_emitted
1089         }
1090
1091         // Remembers that we already emitted a `ChannelReady` event.
1092         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1093                 self.channel_ready_event_emitted = true;
1094         }
1095
1096         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1097         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1098         /// no longer be considered when forwarding HTLCs.
1099         pub fn maybe_expire_prev_config(&mut self) {
1100                 if self.prev_config.is_none() {
1101                         return;
1102                 }
1103                 let prev_config = self.prev_config.as_mut().unwrap();
1104                 prev_config.1 += 1;
1105                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1106                         self.prev_config = None;
1107                 }
1108         }
1109
1110         /// Returns the current [`ChannelConfig`] applied to the channel.
1111         pub fn config(&self) -> ChannelConfig {
1112                 self.config.options
1113         }
1114
1115         /// Updates the channel's config. A bool is returned indicating whether the config update
1116         /// applied resulted in a new ChannelUpdate message.
1117         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1118                 let did_channel_update =
1119                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1120                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1121                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1122                 if did_channel_update {
1123                         self.prev_config = Some((self.config.options, 0));
1124                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1125                         // policy change to propagate throughout the network.
1126                         self.update_time_counter += 1;
1127                 }
1128                 self.config.options = *config;
1129                 did_channel_update
1130         }
1131
1132         /// Returns true if funding_created was sent/received.
1133         pub fn is_funding_initiated(&self) -> bool {
1134                 self.channel_state >= ChannelState::FundingSent as u32
1135         }
1136
1137         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1138         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1139         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1140         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1141         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1142         /// an HTLC to a).
1143         /// @local is used only to convert relevant internal structures which refer to remote vs local
1144         /// to decide value of outputs and direction of HTLCs.
1145         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1146         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1147         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1148         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1149         /// which peer generated this transaction and "to whom" this transaction flows.
1150         #[inline]
1151         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1152                 where L::Target: Logger
1153         {
1154                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1155                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1156                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1157
1158                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1159                 let mut remote_htlc_total_msat = 0;
1160                 let mut local_htlc_total_msat = 0;
1161                 let mut value_to_self_msat_offset = 0;
1162
1163                 let mut feerate_per_kw = self.feerate_per_kw;
1164                 if let Some((feerate, update_state)) = self.pending_update_fee {
1165                         if match update_state {
1166                                 // Note that these match the inclusion criteria when scanning
1167                                 // pending_inbound_htlcs below.
1168                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1169                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1170                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
1171                         } {
1172                                 feerate_per_kw = feerate;
1173                         }
1174                 }
1175
1176                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1177                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1178                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1179                         log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1180
1181                 macro_rules! get_htlc_in_commitment {
1182                         ($htlc: expr, $offered: expr) => {
1183                                 HTLCOutputInCommitment {
1184                                         offered: $offered,
1185                                         amount_msat: $htlc.amount_msat,
1186                                         cltv_expiry: $htlc.cltv_expiry,
1187                                         payment_hash: $htlc.payment_hash,
1188                                         transaction_output_index: None
1189                                 }
1190                         }
1191                 }
1192
1193                 macro_rules! add_htlc_output {
1194                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1195                                 if $outbound == local { // "offered HTLC output"
1196                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1197                                         let htlc_tx_fee = if self.opt_anchors() {
1198                                                 0
1199                                         } else {
1200                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000
1201                                         };
1202                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1203                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1204                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1205                                         } else {
1206                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1207                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1208                                         }
1209                                 } else {
1210                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1211                                         let htlc_tx_fee = if self.opt_anchors() {
1212                                                 0
1213                                         } else {
1214                                                 feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000
1215                                         };
1216                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1217                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1218                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1219                                         } else {
1220                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1221                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1222                                         }
1223                                 }
1224                         }
1225                 }
1226
1227                 for ref htlc in self.pending_inbound_htlcs.iter() {
1228                         let (include, state_name) = match htlc.state {
1229                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1230                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1231                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1232                                 InboundHTLCState::Committed => (true, "Committed"),
1233                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1234                         };
1235
1236                         if include {
1237                                 add_htlc_output!(htlc, false, None, state_name);
1238                                 remote_htlc_total_msat += htlc.amount_msat;
1239                         } else {
1240                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
1241                                 match &htlc.state {
1242                                         &InboundHTLCState::LocalRemoved(ref reason) => {
1243                                                 if generated_by_local {
1244                                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1245                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
1246                                                         }
1247                                                 }
1248                                         },
1249                                         _ => {},
1250                                 }
1251                         }
1252                 }
1253
1254                 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1255
1256                 for ref htlc in self.pending_outbound_htlcs.iter() {
1257                         let (include, state_name) = match htlc.state {
1258                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1259                                 OutboundHTLCState::Committed => (true, "Committed"),
1260                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1261                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1262                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1263                         };
1264
1265                         let preimage_opt = match htlc.state {
1266                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1267                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1268                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1269                                 _ => None,
1270                         };
1271
1272                         if let Some(preimage) = preimage_opt {
1273                                 preimages.push(preimage);
1274                         }
1275
1276                         if include {
1277                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1278                                 local_htlc_total_msat += htlc.amount_msat;
1279                         } else {
1280                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
1281                                 match htlc.state {
1282                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1283                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
1284                                         },
1285                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1286                                                 if !generated_by_local {
1287                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
1288                                                 }
1289                                         },
1290                                         _ => {},
1291                                 }
1292                         }
1293                 }
1294
1295                 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1296                 assert!(value_to_self_msat >= 0);
1297                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1298                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1299                 // "violate" their reserve value by couting those against it. Thus, we have to convert
1300                 // everything to i64 before subtracting as otherwise we can overflow.
1301                 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1302                 assert!(value_to_remote_msat >= 0);
1303
1304                 #[cfg(debug_assertions)]
1305                 {
1306                         // Make sure that the to_self/to_remote is always either past the appropriate
1307                         // channel_reserve *or* it is making progress towards it.
1308                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1309                                 self.holder_max_commitment_tx_output.lock().unwrap()
1310                         } else {
1311                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
1312                         };
1313                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1314                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1315                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1316                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1317                 }
1318
1319                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some());
1320                 let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1321                 let (value_to_self, value_to_remote) = if self.is_outbound() {
1322                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1323                 } else {
1324                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1325                 };
1326
1327                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1328                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1329                 let (funding_pubkey_a, funding_pubkey_b) = if local {
1330                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1331                 } else {
1332                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1333                 };
1334
1335                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1336                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1337                 } else {
1338                         value_to_a = 0;
1339                 }
1340
1341                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1342                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1343                 } else {
1344                         value_to_b = 0;
1345                 }
1346
1347                 let num_nondust_htlcs = included_non_dust_htlcs.len();
1348
1349                 let channel_parameters =
1350                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1351                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1352                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1353                                                                              value_to_a as u64,
1354                                                                              value_to_b as u64,
1355                                                                              self.channel_transaction_parameters.opt_anchors.is_some(),
1356                                                                              funding_pubkey_a,
1357                                                                              funding_pubkey_b,
1358                                                                              keys.clone(),
1359                                                                              feerate_per_kw,
1360                                                                              &mut included_non_dust_htlcs,
1361                                                                              &channel_parameters
1362                 );
1363                 let mut htlcs_included = included_non_dust_htlcs;
1364                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1365                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1366                 htlcs_included.append(&mut included_dust_htlcs);
1367
1368                 // For the stats, trimmed-to-0 the value in msats accordingly
1369                 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1370                 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1371
1372                 CommitmentStats {
1373                         tx,
1374                         feerate_per_kw,
1375                         total_fee_sat,
1376                         num_nondust_htlcs,
1377                         htlcs_included,
1378                         local_balance_msat: value_to_self_msat as u64,
1379                         remote_balance_msat: value_to_remote_msat as u64,
1380                         preimages
1381                 }
1382         }
1383
1384         #[inline]
1385         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1386         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1387         /// our counterparty!)
1388         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1389         /// TODO Some magic rust shit to compile-time check this?
1390         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1391                 let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx);
1392                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1393                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1394                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1395
1396                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1397         }
1398
1399         #[inline]
1400         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1401         /// will sign and send to our counterparty.
1402         /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
1403         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1404                 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1405                 //may see payments to it!
1406                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1407                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1408                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1409
1410                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1411         }
1412
1413         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1414         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1415         /// Panics if called before accept_channel/new_from_req
1416         pub fn get_funding_redeemscript(&self) -> Script {
1417                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1418         }
1419
1420         fn counterparty_funding_pubkey(&self) -> &PublicKey {
1421                 &self.get_counterparty_pubkeys().funding_pubkey
1422         }
1423
1424         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1425                 self.feerate_per_kw
1426         }
1427
1428         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1429                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1430                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1431                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1432                 // more dust balance if the feerate increases when we have several HTLCs pending
1433                 // which are near the dust limit.
1434                 let mut feerate_per_kw = self.feerate_per_kw;
1435                 // If there's a pending update fee, use it to ensure we aren't under-estimating
1436                 // potential feerate updates coming soon.
1437                 if let Some((feerate, _)) = self.pending_update_fee {
1438                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1439                 }
1440                 if let Some(feerate) = outbound_feerate_update {
1441                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1442                 }
1443                 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1444         }
1445
1446         /// Get forwarding information for the counterparty.
1447         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1448                 self.counterparty_forwarding_info.clone()
1449         }
1450
1451         /// Returns a HTLCStats about inbound pending htlcs
1452         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1453                 let context = self;
1454                 let mut stats = HTLCStats {
1455                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1456                         pending_htlcs_value_msat: 0,
1457                         on_counterparty_tx_dust_exposure_msat: 0,
1458                         on_holder_tx_dust_exposure_msat: 0,
1459                         holding_cell_msat: 0,
1460                         on_holder_tx_holding_cell_htlcs_count: 0,
1461                 };
1462
1463                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
1464                         (0, 0)
1465                 } else {
1466                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1467                         (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
1468                                 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
1469                 };
1470                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1471                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1472                 for ref htlc in context.pending_inbound_htlcs.iter() {
1473                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1474                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1475                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1476                         }
1477                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1478                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1479                         }
1480                 }
1481                 stats
1482         }
1483
1484         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1485         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1486                 let context = self;
1487                 let mut stats = HTLCStats {
1488                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1489                         pending_htlcs_value_msat: 0,
1490                         on_counterparty_tx_dust_exposure_msat: 0,
1491                         on_holder_tx_dust_exposure_msat: 0,
1492                         holding_cell_msat: 0,
1493                         on_holder_tx_holding_cell_htlcs_count: 0,
1494                 };
1495
1496                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
1497                         (0, 0)
1498                 } else {
1499                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1500                         (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
1501                                 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
1502                 };
1503                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1504                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1505                 for ref htlc in context.pending_outbound_htlcs.iter() {
1506                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1507                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1508                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1509                         }
1510                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1511                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1512                         }
1513                 }
1514
1515                 for update in context.holding_cell_htlc_updates.iter() {
1516                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1517                                 stats.pending_htlcs += 1;
1518                                 stats.pending_htlcs_value_msat += amount_msat;
1519                                 stats.holding_cell_msat += amount_msat;
1520                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1521                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1522                                 }
1523                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1524                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
1525                                 } else {
1526                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
1527                                 }
1528                         }
1529                 }
1530                 stats
1531         }
1532
1533         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1534         /// Doesn't bother handling the
1535         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1536         /// corner case properly.
1537         pub fn get_available_balances(&self) -> AvailableBalances {
1538                 let context = &self;
1539                 // Note that we have to handle overflow due to the above case.
1540                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1541                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1542
1543                 let mut balance_msat = context.value_to_self_msat;
1544                 for ref htlc in context.pending_inbound_htlcs.iter() {
1545                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1546                                 balance_msat += htlc.amount_msat;
1547                         }
1548                 }
1549                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1550
1551                 let outbound_capacity_msat = context.value_to_self_msat
1552                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1553                                 .saturating_sub(
1554                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1555
1556                 let mut available_capacity_msat = outbound_capacity_msat;
1557
1558                 if context.is_outbound() {
1559                         // We should mind channel commit tx fee when computing how much of the available capacity
1560                         // can be used in the next htlc. Mirrors the logic in send_htlc.
1561                         //
1562                         // The fee depends on whether the amount we will be sending is above dust or not,
1563                         // and the answer will in turn change the amount itself â€” making it a circular
1564                         // dependency.
1565                         // This complicates the computation around dust-values, up to the one-htlc-value.
1566                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1567                         if !context.opt_anchors() {
1568                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
1569                         }
1570
1571                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1572                         let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1573                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1574                         let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1575
1576                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
1577                         // value ends up being below dust, we have this fee available again. In that case,
1578                         // match the value to right-below-dust.
1579                         let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
1580                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1581                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1582                                 debug_assert!(one_htlc_difference_msat != 0);
1583                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1584                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1585                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1586                         } else {
1587                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1588                         }
1589                 } else {
1590                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1591                         // sending a new HTLC won't reduce their balance below our reserve threshold.
1592                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1593                         if !context.opt_anchors() {
1594                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
1595                         }
1596
1597                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1598                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1599
1600                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1601                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1602                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1603
1604                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
1605                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1606                                 // we've selected for them, we can only send dust HTLCs.
1607                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1608                         }
1609                 }
1610
1611                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1612
1613                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1614                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1615                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1616                 // send above the dust limit (as the router can always overpay to meet the dust limit).
1617                 let mut remaining_msat_below_dust_exposure_limit = None;
1618                 let mut dust_exposure_dust_limit_msat = 0;
1619
1620                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
1621                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1622                 } else {
1623                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1624                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
1625                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
1626                 };
1627                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1628                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
1629                         remaining_msat_below_dust_exposure_limit =
1630                                 Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1631                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1632                 }
1633
1634                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1635                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
1636                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1637                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1638                                 context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
1639                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1640                 }
1641
1642                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1643                         if available_capacity_msat < dust_exposure_dust_limit_msat {
1644                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1645                         } else {
1646                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1647                         }
1648                 }
1649
1650                 available_capacity_msat = cmp::min(available_capacity_msat,
1651                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1652
1653                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1654                         available_capacity_msat = 0;
1655                 }
1656
1657                 AvailableBalances {
1658                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1659                                         - context.value_to_self_msat as i64
1660                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1661                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1662                                 0) as u64,
1663                         outbound_capacity_msat,
1664                         next_outbound_htlc_limit_msat: available_capacity_msat,
1665                         next_outbound_htlc_minimum_msat,
1666                         balance_msat,
1667                 }
1668         }
1669
1670         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1671                 let context = &self;
1672                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1673         }
1674
1675         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1676         /// number of pending HTLCs that are on track to be in our next commitment tx.
1677         ///
1678         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1679         /// `fee_spike_buffer_htlc` is `Some`.
1680         ///
1681         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1682         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1683         ///
1684         /// Dust HTLCs are excluded.
1685         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1686                 let context = &self;
1687                 assert!(context.is_outbound());
1688
1689                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
1690                         (0, 0)
1691                 } else {
1692                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
1693                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
1694                 };
1695                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1696                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1697
1698                 let mut addl_htlcs = 0;
1699                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1700                 match htlc.origin {
1701                         HTLCInitiator::LocalOffered => {
1702                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1703                                         addl_htlcs += 1;
1704                                 }
1705                         },
1706                         HTLCInitiator::RemoteOffered => {
1707                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1708                                         addl_htlcs += 1;
1709                                 }
1710                         }
1711                 }
1712
1713                 let mut included_htlcs = 0;
1714                 for ref htlc in context.pending_inbound_htlcs.iter() {
1715                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1716                                 continue
1717                         }
1718                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1719                         // transaction including this HTLC if it times out before they RAA.
1720                         included_htlcs += 1;
1721                 }
1722
1723                 for ref htlc in context.pending_outbound_htlcs.iter() {
1724                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1725                                 continue
1726                         }
1727                         match htlc.state {
1728                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1729                                 OutboundHTLCState::Committed => included_htlcs += 1,
1730                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1731                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1732                                 // transaction won't be generated until they send us their next RAA, which will mean
1733                                 // dropping any HTLCs in this state.
1734                                 _ => {},
1735                         }
1736                 }
1737
1738                 for htlc in context.holding_cell_htlc_updates.iter() {
1739                         match htlc {
1740                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1741                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
1742                                                 continue
1743                                         }
1744                                         included_htlcs += 1
1745                                 },
1746                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1747                                          // ack we're guaranteed to never include them in commitment txs anymore.
1748                         }
1749                 }
1750
1751                 let num_htlcs = included_htlcs + addl_htlcs;
1752                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
1753                 #[cfg(any(test, fuzzing))]
1754                 {
1755                         let mut fee = res;
1756                         if fee_spike_buffer_htlc.is_some() {
1757                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
1758                         }
1759                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1760                                 + context.holding_cell_htlc_updates.len();
1761                         let commitment_tx_info = CommitmentTxInfoCached {
1762                                 fee,
1763                                 total_pending_htlcs,
1764                                 next_holder_htlc_id: match htlc.origin {
1765                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1766                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1767                                 },
1768                                 next_counterparty_htlc_id: match htlc.origin {
1769                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1770                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1771                                 },
1772                                 feerate: context.feerate_per_kw,
1773                         };
1774                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1775                 }
1776                 res
1777         }
1778
1779         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1780         /// pending HTLCs that are on track to be in their next commitment tx
1781         ///
1782         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1783         /// `fee_spike_buffer_htlc` is `Some`.
1784         ///
1785         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1786         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1787         ///
1788         /// Dust HTLCs are excluded.
1789         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1790                 let context = &self;
1791                 assert!(!context.is_outbound());
1792
1793                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
1794                         (0, 0)
1795                 } else {
1796                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
1797                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
1798                 };
1799                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1800                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1801
1802                 let mut addl_htlcs = 0;
1803                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1804                 match htlc.origin {
1805                         HTLCInitiator::LocalOffered => {
1806                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1807                                         addl_htlcs += 1;
1808                                 }
1809                         },
1810                         HTLCInitiator::RemoteOffered => {
1811                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1812                                         addl_htlcs += 1;
1813                                 }
1814                         }
1815                 }
1816
1817                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1818                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1819                 // committed outbound HTLCs, see below.
1820                 let mut included_htlcs = 0;
1821                 for ref htlc in context.pending_inbound_htlcs.iter() {
1822                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1823                                 continue
1824                         }
1825                         included_htlcs += 1;
1826                 }
1827
1828                 for ref htlc in context.pending_outbound_htlcs.iter() {
1829                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1830                                 continue
1831                         }
1832                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1833                         // i.e. if they've responded to us with an RAA after announcement.
1834                         match htlc.state {
1835                                 OutboundHTLCState::Committed => included_htlcs += 1,
1836                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1837                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1838                                 _ => {},
1839                         }
1840                 }
1841
1842                 let num_htlcs = included_htlcs + addl_htlcs;
1843                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
1844                 #[cfg(any(test, fuzzing))]
1845                 {
1846                         let mut fee = res;
1847                         if fee_spike_buffer_htlc.is_some() {
1848                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
1849                         }
1850                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1851                         let commitment_tx_info = CommitmentTxInfoCached {
1852                                 fee,
1853                                 total_pending_htlcs,
1854                                 next_holder_htlc_id: match htlc.origin {
1855                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1856                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1857                                 },
1858                                 next_counterparty_htlc_id: match htlc.origin {
1859                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1860                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1861                                 },
1862                                 feerate: context.feerate_per_kw,
1863                         };
1864                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1865                 }
1866                 res
1867         }
1868
1869         /// Returns transaction if there is pending funding transaction that is yet to broadcast
1870         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
1871                 if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
1872                         self.funding_transaction.clone()
1873                 } else {
1874                         None
1875                 }
1876         }
1877 }
1878
1879 // Internal utility functions for channels
1880
1881 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
1882 /// `channel_value_satoshis` in msat, set through
1883 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
1884 ///
1885 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
1886 ///
1887 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
1888 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
1889         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
1890                 1
1891         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
1892                 100
1893         } else {
1894                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
1895         };
1896         channel_value_satoshis * 10 * configured_percent
1897 }
1898
1899 /// Returns a minimum channel reserve value the remote needs to maintain,
1900 /// required by us according to the configured or default
1901 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
1902 ///
1903 /// Guaranteed to return a value no larger than channel_value_satoshis
1904 ///
1905 /// This is used both for outbound and inbound channels and has lower bound
1906 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
1907 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
1908         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
1909         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
1910 }
1911
1912 /// This is for legacy reasons, present for forward-compatibility.
1913 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
1914 /// from storage. Hence, we use this function to not persist default values of
1915 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
1916 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
1917         let (q, _) = channel_value_satoshis.overflowing_div(100);
1918         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
1919 }
1920
1921 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
1922 // Note that num_htlcs should not include dust HTLCs.
1923 #[inline]
1924 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
1925         feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
1926 }
1927
1928 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
1929 // Note that num_htlcs should not include dust HTLCs.
1930 fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
1931         // Note that we need to divide before multiplying to round properly,
1932         // since the lowest denomination of bitcoin on-chain is the satoshi.
1933         (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
1934 }
1935
1936 // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
1937 // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
1938 // calling channel_id() before we're set up or things like get_outbound_funding_signed on an
1939 // inbound channel.
1940 //
1941 // Holder designates channel data owned for the benefit of the user client.
1942 // Counterparty designates channel data owned by the another channel participant entity.
1943 pub(super) struct Channel<Signer: ChannelSigner> {
1944         pub context: ChannelContext<Signer>,
1945 }
1946
1947 #[cfg(any(test, fuzzing))]
1948 struct CommitmentTxInfoCached {
1949         fee: u64,
1950         total_pending_htlcs: usize,
1951         next_holder_htlc_id: u64,
1952         next_counterparty_htlc_id: u64,
1953         feerate: u32,
1954 }
1955
1956 impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
1957         /// If we receive an error message, it may only be a rejection of the channel type we tried,
1958         /// not of our ability to open any channel at all. Thus, on error, we should first call this
1959         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
1960         pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
1961                 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
1962                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
1963                         // We've exhausted our options
1964                         return Err(());
1965                 }
1966                 // We support opening a few different types of channels. Try removing our additional
1967                 // features one by one until we've either arrived at our default or the counterparty has
1968                 // accepted one.
1969                 //
1970                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
1971                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
1972                 // checks whether the counterparty supports every feature, this would only happen if the
1973                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
1974                 // whatever reason.
1975                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
1976                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
1977                         assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none());
1978                         self.context.channel_transaction_parameters.opt_anchors = None;
1979                 } else if self.context.channel_type.supports_scid_privacy() {
1980                         self.context.channel_type.clear_scid_privacy();
1981                 } else {
1982                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
1983                 }
1984                 Ok(self.get_open_channel(chain_hash))
1985         }
1986
1987         // Constructors:
1988
1989         fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
1990                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
1991                 -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
1992         {
1993                 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
1994                 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
1995                 // We generally don't care too much if they set the feerate to something very high, but it
1996                 // could result in the channel being useless due to everything being dust.
1997                 let upper_limit = cmp::max(250 * 25,
1998                         fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
1999                 if feerate_per_kw as u64 > upper_limit {
2000                         return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2001                 }
2002                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
2003                 // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
2004                 // occasional issues with feerate disagreements between an initiator that wants a feerate
2005                 // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
2006                 // sat/kw before the comparison here.
2007                 if feerate_per_kw + 250 < lower_limit {
2008                         if let Some(cur_feerate) = cur_feerate_per_kw {
2009                                 if feerate_per_kw > cur_feerate {
2010                                         log_warn!(logger,
2011                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2012                                                 cur_feerate, feerate_per_kw);
2013                                         return Ok(());
2014                                 }
2015                         }
2016                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
2017                 }
2018                 Ok(())
2019         }
2020
2021         #[inline]
2022         fn get_closing_scriptpubkey(&self) -> Script {
2023                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2024                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2025                 // outside of those situations will fail.
2026                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2027         }
2028
2029         #[inline]
2030         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2031                 let mut ret =
2032                 (4 +                                                   // version
2033                  1 +                                                   // input count
2034                  36 +                                                  // prevout
2035                  1 +                                                   // script length (0)
2036                  4 +                                                   // sequence
2037                  1 +                                                   // output count
2038                  4                                                     // lock time
2039                  )*4 +                                                 // * 4 for non-witness parts
2040                 2 +                                                    // witness marker and flag
2041                 1 +                                                    // witness element count
2042                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
2043                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2044                 2*(1 + 71);                                            // two signatures + sighash type flags
2045                 if let Some(spk) = a_scriptpubkey {
2046                         ret += ((8+1) +                                    // output values and script length
2047                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2048                 }
2049                 if let Some(spk) = b_scriptpubkey {
2050                         ret += ((8+1) +                                    // output values and script length
2051                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2052                 }
2053                 ret
2054         }
2055
2056         #[inline]
2057         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2058                 assert!(self.context.pending_inbound_htlcs.is_empty());
2059                 assert!(self.context.pending_outbound_htlcs.is_empty());
2060                 assert!(self.context.pending_update_fee.is_none());
2061
2062                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2063                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2064                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2065
2066                 if value_to_holder < 0 {
2067                         assert!(self.context.is_outbound());
2068                         total_fee_satoshis += (-value_to_holder) as u64;
2069                 } else if value_to_counterparty < 0 {
2070                         assert!(!self.context.is_outbound());
2071                         total_fee_satoshis += (-value_to_counterparty) as u64;
2072                 }
2073
2074                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2075                         value_to_counterparty = 0;
2076                 }
2077
2078                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2079                         value_to_holder = 0;
2080                 }
2081
2082                 assert!(self.context.shutdown_scriptpubkey.is_some());
2083                 let holder_shutdown_script = self.get_closing_scriptpubkey();
2084                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2085                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2086
2087                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2088                 (closing_transaction, total_fee_satoshis)
2089         }
2090
2091         fn funding_outpoint(&self) -> OutPoint {
2092                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2093         }
2094
2095         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2096         /// entirely.
2097         ///
2098         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2099         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2100         ///
2101         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2102         /// disconnected).
2103         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2104                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2105         where L::Target: Logger {
2106                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2107                 // (see equivalent if condition there).
2108                 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2109                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2110                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2111                 self.context.latest_monitor_update_id = mon_update_id;
2112                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2113                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2114                 }
2115         }
2116
2117         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2118                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2119                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2120                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2121                 // either.
2122                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2123                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2124                 }
2125                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2126
2127                 let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
2128
2129                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2130                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2131                 // these, but for now we just have to treat them as normal.
2132
2133                 let mut pending_idx = core::usize::MAX;
2134                 let mut htlc_value_msat = 0;
2135                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2136                         if htlc.htlc_id == htlc_id_arg {
2137                                 assert_eq!(htlc.payment_hash, payment_hash_calc);
2138                                 match htlc.state {
2139                                         InboundHTLCState::Committed => {},
2140                                         InboundHTLCState::LocalRemoved(ref reason) => {
2141                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2142                                                 } else {
2143                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
2144                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2145                                                 }
2146                                                 return UpdateFulfillFetch::DuplicateClaim {};
2147                                         },
2148                                         _ => {
2149                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2150                                                 // Don't return in release mode here so that we can update channel_monitor
2151                                         }
2152                                 }
2153                                 pending_idx = idx;
2154                                 htlc_value_msat = htlc.amount_msat;
2155                                 break;
2156                         }
2157                 }
2158                 if pending_idx == core::usize::MAX {
2159                         #[cfg(any(test, fuzzing))]
2160                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2161                         // this is simply a duplicate claim, not previously failed and we lost funds.
2162                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2163                         return UpdateFulfillFetch::DuplicateClaim {};
2164                 }
2165
2166                 // Now update local state:
2167                 //
2168                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2169                 // can claim it even if the channel hits the chain before we see their next commitment.
2170                 self.context.latest_monitor_update_id += 1;
2171                 let monitor_update = ChannelMonitorUpdate {
2172                         update_id: self.context.latest_monitor_update_id,
2173                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2174                                 payment_preimage: payment_preimage_arg.clone(),
2175                         }],
2176                 };
2177
2178                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2179                         // Note that this condition is the same as the assertion in
2180                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2181                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2182                         // do not not get into this branch.
2183                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2184                                 match pending_update {
2185                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2186                                                 if htlc_id_arg == htlc_id {
2187                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
2188                                                         self.context.latest_monitor_update_id -= 1;
2189                                                         #[cfg(any(test, fuzzing))]
2190                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2191                                                         return UpdateFulfillFetch::DuplicateClaim {};
2192                                                 }
2193                                         },
2194                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2195                                                 if htlc_id_arg == htlc_id {
2196                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
2197                                                         // TODO: We may actually be able to switch to a fulfill here, though its
2198                                                         // rare enough it may not be worth the complexity burden.
2199                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2200                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2201                                                 }
2202                                         },
2203                                         _ => {}
2204                                 }
2205                         }
2206                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
2207                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2208                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2209                         });
2210                         #[cfg(any(test, fuzzing))]
2211                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2212                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2213                 }
2214                 #[cfg(any(test, fuzzing))]
2215                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2216
2217                 {
2218                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2219                         if let InboundHTLCState::Committed = htlc.state {
2220                         } else {
2221                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2222                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2223                         }
2224                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
2225                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2226                 }
2227
2228                 UpdateFulfillFetch::NewClaim {
2229                         monitor_update,
2230                         htlc_value_msat,
2231                         msg: Some(msgs::UpdateFulfillHTLC {
2232                                 channel_id: self.context.channel_id(),
2233                                 htlc_id: htlc_id_arg,
2234                                 payment_preimage: payment_preimage_arg,
2235                         }),
2236                 }
2237         }
2238
2239         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2240                 let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
2241                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2242                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2243                                 // Even if we aren't supposed to let new monitor updates with commitment state
2244                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2245                                 // matter what. Sadly, to push a new monitor update which flies before others
2246                                 // already queued, we have to insert it into the pending queue and update the
2247                                 // update_ids of all the following monitors.
2248                                 let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
2249                                         let mut additional_update = self.build_commitment_no_status_check(logger);
2250                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
2251                                         // to be strictly increasing by one, so decrement it here.
2252                                         self.context.latest_monitor_update_id = monitor_update.update_id;
2253                                         monitor_update.updates.append(&mut additional_update.updates);
2254                                         self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
2255                                                 update: monitor_update, blocked: false,
2256                                         });
2257                                         self.context.pending_monitor_updates.len() - 1
2258                                 } else {
2259                                         let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
2260                                                 .unwrap_or(self.context.pending_monitor_updates.len());
2261                                         let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
2262                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2263                                         monitor_update.update_id = new_mon_id;
2264                                         self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
2265                                                 update: monitor_update, blocked: false,
2266                                         });
2267                                         for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
2268                                                 held_update.update.update_id += 1;
2269                                         }
2270                                         if msg.is_some() {
2271                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2272                                                 let update = self.build_commitment_no_status_check(logger);
2273                                                 self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
2274                                                         update, blocked: true,
2275                                                 });
2276                                         }
2277                                         insert_pos
2278                                 };
2279                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2280                                 UpdateFulfillCommitFetch::NewClaim {
2281                                         monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
2282                                                 .expect("We just pushed the monitor update").update,
2283                                         htlc_value_msat,
2284                                 }
2285                         },
2286                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2287                 }
2288         }
2289
2290         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2291         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2292         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2293         /// before we fail backwards.
2294         ///
2295         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2296         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2297         /// [`ChannelError::Ignore`].
2298         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2299         -> Result<(), ChannelError> where L::Target: Logger {
2300                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2301                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2302         }
2303
2304         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2305         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2306         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2307         /// before we fail backwards.
2308         ///
2309         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2310         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2311         /// [`ChannelError::Ignore`].
2312         fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2313         -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2314                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2315                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
2316                 }
2317                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2318
2319                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2320                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2321                 // these, but for now we just have to treat them as normal.
2322
2323                 let mut pending_idx = core::usize::MAX;
2324                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2325                         if htlc.htlc_id == htlc_id_arg {
2326                                 match htlc.state {
2327                                         InboundHTLCState::Committed => {},
2328                                         InboundHTLCState::LocalRemoved(ref reason) => {
2329                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2330                                                 } else {
2331                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2332                                                 }
2333                                                 return Ok(None);
2334                                         },
2335                                         _ => {
2336                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2337                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2338                                         }
2339                                 }
2340                                 pending_idx = idx;
2341                         }
2342                 }
2343                 if pending_idx == core::usize::MAX {
2344                         #[cfg(any(test, fuzzing))]
2345                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2346                         // is simply a duplicate fail, not previously failed and we failed-back too early.
2347                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2348                         return Ok(None);
2349                 }
2350
2351                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2352                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2353                         force_holding_cell = true;
2354                 }
2355
2356                 // Now update local state:
2357                 if force_holding_cell {
2358                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2359                                 match pending_update {
2360                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2361                                                 if htlc_id_arg == htlc_id {
2362                                                         #[cfg(any(test, fuzzing))]
2363                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2364                                                         return Ok(None);
2365                                                 }
2366                                         },
2367                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2368                                                 if htlc_id_arg == htlc_id {
2369                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2370                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2371                                                 }
2372                                         },
2373                                         _ => {}
2374                                 }
2375                         }
2376                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
2377                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2378                                 htlc_id: htlc_id_arg,
2379                                 err_packet,
2380                         });
2381                         return Ok(None);
2382                 }
2383
2384                 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
2385                 {
2386                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2387                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2388                 }
2389
2390                 Ok(Some(msgs::UpdateFailHTLC {
2391                         channel_id: self.context.channel_id(),
2392                         htlc_id: htlc_id_arg,
2393                         reason: err_packet
2394                 }))
2395         }
2396
2397         // Message handlers:
2398
2399         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
2400                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
2401
2402                 // Check sanity of message fields:
2403                 if !self.context.is_outbound() {
2404                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
2405                 }
2406                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
2407                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
2408                 }
2409                 if msg.dust_limit_satoshis > 21000000 * 100000000 {
2410                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
2411                 }
2412                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
2413                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
2414                 }
2415                 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
2416                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
2417                 }
2418                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
2419                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
2420                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
2421                 }
2422                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
2423                 if msg.htlc_minimum_msat >= full_channel_value_msat {
2424                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
2425                 }
2426                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
2427                 if msg.to_self_delay > max_delay_acceptable {
2428                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
2429                 }
2430                 if msg.max_accepted_htlcs < 1 {
2431                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
2432                 }
2433                 if msg.max_accepted_htlcs > MAX_HTLCS {
2434                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
2435                 }
2436
2437                 // Now check against optional parameters as set by config...
2438                 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
2439                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
2440                 }
2441                 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
2442                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
2443                 }
2444                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
2445                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
2446                 }
2447                 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
2448                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
2449                 }
2450                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2451                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
2452                 }
2453                 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
2454                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
2455                 }
2456                 if msg.minimum_depth > peer_limits.max_minimum_depth {
2457                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
2458                 }
2459
2460                 if let Some(ty) = &msg.channel_type {
2461                         if *ty != self.context.channel_type {
2462                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
2463                         }
2464                 } else if their_features.supports_channel_type() {
2465                         // Assume they've accepted the channel type as they said they understand it.
2466                 } else {
2467                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
2468                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
2469                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
2470                         }
2471                         self.context.channel_type = channel_type;
2472                 }
2473
2474                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
2475                         match &msg.shutdown_scriptpubkey {
2476                                 &Some(ref script) => {
2477                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
2478                                         if script.len() == 0 {
2479                                                 None
2480                                         } else {
2481                                                 if !script::is_bolt2_compliant(&script, their_features) {
2482                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
2483                                                 }
2484                                                 Some(script.clone())
2485                                         }
2486                                 },
2487                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
2488                                 &None => {
2489                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
2490                                 }
2491                         }
2492                 } else { None };
2493
2494                 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
2495                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
2496                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
2497                 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
2498                 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
2499
2500                 if peer_limits.trust_own_funding_0conf {
2501                         self.context.minimum_depth = Some(msg.minimum_depth);
2502                 } else {
2503                         self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
2504                 }
2505
2506                 let counterparty_pubkeys = ChannelPublicKeys {
2507                         funding_pubkey: msg.funding_pubkey,
2508                         revocation_basepoint: msg.revocation_basepoint,
2509                         payment_point: msg.payment_point,
2510                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
2511                         htlc_basepoint: msg.htlc_basepoint
2512                 };
2513
2514                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
2515                         selected_contest_delay: msg.to_self_delay,
2516                         pubkeys: counterparty_pubkeys,
2517                 });
2518
2519                 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
2520                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
2521
2522                 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
2523                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
2524
2525                 Ok(())
2526         }
2527
2528         fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
2529                 let funding_script = self.context.get_funding_redeemscript();
2530
2531                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2532                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
2533                 {
2534                         let trusted_tx = initial_commitment_tx.trust();
2535                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2536                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2537                         // They sign the holder commitment transaction...
2538                         log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
2539                                 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
2540                                 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
2541                                 encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
2542                         secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
2543                 }
2544
2545                 let counterparty_keys = self.context.build_remote_transaction_keys();
2546                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2547
2548                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2549                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2550                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2551                         log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2552
2553                 let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
2554                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
2555
2556                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2557                 Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
2558         }
2559
2560         pub fn funding_created<SP: Deref, L: Deref>(
2561                 &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
2562         ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
2563         where
2564                 SP::Target: SignerProvider<Signer = Signer>,
2565                 L::Target: Logger
2566         {
2567                 if self.context.is_outbound() {
2568                         return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
2569                 }
2570                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
2571                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
2572                         // remember the channel, so it's safe to just send an error_message here and drop the
2573                         // channel.
2574                         return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
2575                 }
2576                 if self.context.inbound_awaiting_accept {
2577                         return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()));
2578                 }
2579                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2580                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2581                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2582                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2583                 }
2584
2585                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
2586                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
2587                 // This is an externally observable change before we finish all our checks.  In particular
2588                 // funding_created_signature may fail.
2589                 self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2590
2591                 let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
2592                         Ok(res) => res,
2593                         Err(ChannelError::Close(e)) => {
2594                                 self.context.channel_transaction_parameters.funding_outpoint = None;
2595                                 return Err(ChannelError::Close(e));
2596                         },
2597                         Err(e) => {
2598                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
2599                                 // to make sure we don't continue with an inconsistent state.
2600                                 panic!("unexpected error type from funding_created_signature {:?}", e);
2601                         }
2602                 };
2603
2604                 let holder_commitment_tx = HolderCommitmentTransaction::new(
2605                         initial_commitment_tx,
2606                         msg.signature,
2607                         Vec::new(),
2608                         &self.context.get_holder_pubkeys().funding_pubkey,
2609                         self.context.counterparty_funding_pubkey()
2610                 );
2611
2612                 self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
2613                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2614
2615                         // Now that we're past error-generating stuff, update our local state:
2616
2617                 let funding_redeemscript = self.context.get_funding_redeemscript();
2618                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2619                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2620                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2621                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2622                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2623                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2624                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
2625                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
2626                                                           &self.context.channel_transaction_parameters,
2627                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
2628                                                           obscure_factor,
2629                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
2630
2631                 channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
2632
2633                 self.context.channel_state = ChannelState::FundingSent as u32;
2634                 self.context.channel_id = funding_txo.to_channel_id();
2635                 self.context.cur_counterparty_commitment_transaction_number -= 1;
2636                 self.context.cur_holder_commitment_transaction_number -= 1;
2637
2638                 log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
2639
2640                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2641                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2642
2643                 Ok((msgs::FundingSigned {
2644                         channel_id: self.context.channel_id,
2645                         signature,
2646                         #[cfg(taproot)]
2647                         partial_signature_with_nonce: None,
2648                 }, channel_monitor))
2649         }
2650
2651         /// Handles a funding_signed message from the remote end.
2652         /// If this call is successful, broadcast the funding transaction (and not before!)
2653         pub fn funding_signed<SP: Deref, L: Deref>(
2654                 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2655         ) -> Result<ChannelMonitor<Signer>, ChannelError>
2656         where
2657                 SP::Target: SignerProvider<Signer = Signer>,
2658                 L::Target: Logger
2659         {
2660                 if !self.context.is_outbound() {
2661                         return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2662                 }
2663                 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2664                         return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2665                 }
2666                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2667                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2668                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2669                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2670                 }
2671
2672                 let funding_script = self.context.get_funding_redeemscript();
2673
2674                 let counterparty_keys = self.context.build_remote_transaction_keys();
2675                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2676                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2677                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2678
2679                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2680                         log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2681
2682                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2683                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2684                 {
2685                         let trusted_tx = initial_commitment_tx.trust();
2686                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2687                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2688                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2689                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2690                                 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2691                         }
2692                 }
2693
2694                 let holder_commitment_tx = HolderCommitmentTransaction::new(
2695                         initial_commitment_tx,
2696                         msg.signature,
2697                         Vec::new(),
2698                         &self.context.get_holder_pubkeys().funding_pubkey,
2699                         self.context.counterparty_funding_pubkey()
2700                 );
2701
2702                 self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
2703                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2704
2705
2706                 let funding_redeemscript = self.context.get_funding_redeemscript();
2707                 let funding_txo = self.context.get_funding_txo().unwrap();
2708                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2709                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2710                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2711                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2712                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2713                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2714                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
2715                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
2716                                                           &self.context.channel_transaction_parameters,
2717                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
2718                                                           obscure_factor,
2719                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
2720
2721                 channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
2722
2723                 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2724                 self.context.channel_state = ChannelState::FundingSent as u32;
2725                 self.context.cur_holder_commitment_transaction_number -= 1;
2726                 self.context.cur_counterparty_commitment_transaction_number -= 1;
2727
2728                 log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
2729
2730                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2731                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2732                 Ok(channel_monitor)
2733         }
2734
2735         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2736         /// and the channel is now usable (and public), this may generate an announcement_signatures to
2737         /// reply with.
2738         pub fn channel_ready<NS: Deref, L: Deref>(
2739                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
2740                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2741         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2742         where
2743                 NS::Target: NodeSigner,
2744                 L::Target: Logger
2745         {
2746                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2747                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2748                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2749                 }
2750
2751                 if let Some(scid_alias) = msg.short_channel_id_alias {
2752                         if Some(scid_alias) != self.context.short_channel_id {
2753                                 // The scid alias provided can be used to route payments *from* our counterparty,
2754                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
2755                                 // when routing outbound payments.
2756                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
2757                         }
2758                 }
2759
2760                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2761
2762                 if non_shutdown_state == ChannelState::FundingSent as u32 {
2763                         self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2764                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2765                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2766                         self.context.update_time_counter += 1;
2767                 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2768                         // If we reconnected before sending our `channel_ready` they may still resend theirs:
2769                         (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2770                                               (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2771                 {
2772                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
2773                         // required, or they're sending a fresh SCID alias.
2774                         let expected_point =
2775                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2776                                         // If they haven't ever sent an updated point, the point they send should match
2777                                         // the current one.
2778                                         self.context.counterparty_cur_commitment_point
2779                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2780                                         // If we've advanced the commitment number once, the second commitment point is
2781                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
2782                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2783                                         self.context.counterparty_prev_commitment_point
2784                                 } else {
2785                                         // If they have sent updated points, channel_ready is always supposed to match
2786                                         // their "first" point, which we re-derive here.
2787                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2788                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2789                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
2790                                 };
2791                         if expected_point != Some(msg.next_per_commitment_point) {
2792                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2793                         }
2794                         return Ok(None);
2795                 } else {
2796                         return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2797                 }
2798
2799                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2800                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2801
2802                 log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
2803
2804                 Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
2805         }
2806
2807         pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
2808         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
2809                 // We can't accept HTLCs sent after we've sent a shutdown.
2810                 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2811                 if local_sent_shutdown {
2812                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2813                 }
2814                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2815                 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2816                 if remote_sent_shutdown {
2817                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2818                 }
2819                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2820                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2821                 }
2822                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2823                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2824                 }
2825                 if msg.amount_msat == 0 {
2826                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2827                 }
2828                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2829                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2830                 }
2831
2832                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2833                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2834                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2835                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2836                 }
2837                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2838                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2839                 }
2840                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2841                 // the reserve_satoshis we told them to always have as direct payment so that they lose
2842                 // something if we punish them for broadcasting an old state).
2843                 // Note that we don't really care about having a small/no to_remote output in our local
2844                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2845                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2846                 // present in the next commitment transaction we send them (at least for fulfilled ones,
2847                 // failed ones won't modify value_to_self).
2848                 // Note that we will send HTLCs which another instance of rust-lightning would think
2849                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2850                 // Channel state once they will not be present in the next received commitment
2851                 // transaction).
2852                 let mut removed_outbound_total_msat = 0;
2853                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2854                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2855                                 removed_outbound_total_msat += htlc.amount_msat;
2856                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2857                                 removed_outbound_total_msat += htlc.amount_msat;
2858                         }
2859                 }
2860
2861                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
2862                         (0, 0)
2863                 } else {
2864                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2865                         (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
2866                                 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
2867                 };
2868                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2869                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2870                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2871                         if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
2872                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2873                                         on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
2874                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2875                         }
2876                 }
2877
2878                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2879                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2880                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2881                         if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
2882                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2883                                         on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
2884                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2885                         }
2886                 }
2887
2888                 let pending_value_to_self_msat =
2889                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2890                 let pending_remote_value_msat =
2891                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2892                 if pending_remote_value_msat < msg.amount_msat {
2893                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2894                 }
2895
2896                 // Check that the remote can afford to pay for this HTLC on-chain at the current
2897                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2898                 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2899                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2900                         self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2901                 };
2902                 if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
2903                         return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2904                 };
2905
2906                 if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2907                         return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2908                 }
2909
2910                 if !self.context.is_outbound() {
2911                         // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2912                         // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
2913                         // receiver's side, only on the sender's.
2914                         // Note that when we eventually remove support for fee updates and switch to anchor output
2915                         // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
2916                         // the extra htlc when calculating the next remote commitment transaction fee as we should
2917                         // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
2918                         // sensitive to fee spikes.
2919                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2920                         let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2921                         if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
2922                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2923                                 // the HTLC, i.e. its status is already set to failing.
2924                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
2925                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2926                         }
2927                 } else {
2928                         // Check that they won't violate our local required channel reserve by adding this HTLC.
2929                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2930                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2931                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
2932                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2933                         }
2934                 }
2935                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2936                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2937                 }
2938                 if msg.cltv_expiry >= 500000000 {
2939                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2940                 }
2941
2942                 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2943                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2944                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2945                         }
2946                 }
2947
2948                 // Now update local state:
2949                 self.context.next_counterparty_htlc_id += 1;
2950                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2951                         htlc_id: msg.htlc_id,
2952                         amount_msat: msg.amount_msat,
2953                         payment_hash: msg.payment_hash,
2954                         cltv_expiry: msg.cltv_expiry,
2955                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
2956                 });
2957                 Ok(())
2958         }
2959
2960         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2961         #[inline]
2962         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
2963                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
2964                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
2965                         if htlc.htlc_id == htlc_id {
2966                                 let outcome = match check_preimage {
2967                                         None => fail_reason.into(),
2968                                         Some(payment_preimage) => {
2969                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
2970                                                 if payment_hash != htlc.payment_hash {
2971                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
2972                                                 }
2973                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
2974                                         }
2975                                 };
2976                                 match htlc.state {
2977                                         OutboundHTLCState::LocalAnnounced(_) =>
2978                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
2979                                         OutboundHTLCState::Committed => {
2980                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
2981                                         },
2982                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
2983                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
2984                                 }
2985                                 return Ok(htlc);
2986                         }
2987                 }
2988                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
2989         }
2990
2991         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
2992                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2993                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
2994                 }
2995                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2996                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
2997                 }
2998
2999                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3000         }
3001
3002         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3003                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3004                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3005                 }
3006                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3007                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3008                 }
3009
3010                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3011                 Ok(())
3012         }
3013
3014         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3015                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3016                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3017                 }
3018                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3019                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3020                 }
3021
3022                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3023                 Ok(())
3024         }
3025
3026         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
3027                 where L::Target: Logger
3028         {
3029                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3030                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3031                 }
3032                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3033                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3034                 }
3035                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3036                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3037                 }
3038
3039                 let funding_script = self.context.get_funding_redeemscript();
3040
3041                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3042
3043                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3044                 let commitment_txid = {
3045                         let trusted_tx = commitment_stats.tx.trust();
3046                         let bitcoin_tx = trusted_tx.built_transaction();
3047                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3048
3049                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3050                                 log_bytes!(msg.signature.serialize_compact()[..]),
3051                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3052                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
3053                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3054                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3055                         }
3056                         bitcoin_tx.txid
3057                 };
3058                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3059
3060                 // If our counterparty updated the channel fee in this commitment transaction, check that
3061                 // they can actually afford the new fee now.
3062                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3063                         update_state == FeeUpdateState::RemoteAnnounced
3064                 } else { false };
3065                 if update_fee {
3066                         debug_assert!(!self.context.is_outbound());
3067                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3068                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3069                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3070                         }
3071                 }
3072                 #[cfg(any(test, fuzzing))]
3073                 {
3074                         if self.context.is_outbound() {
3075                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3076                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3077                                 if let Some(info) = projected_commit_tx_info {
3078                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3079                                                 + self.context.holding_cell_htlc_updates.len();
3080                                         if info.total_pending_htlcs == total_pending_htlcs
3081                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3082                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3083                                                 && info.feerate == self.context.feerate_per_kw {
3084                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3085                                                 }
3086                                 }
3087                         }
3088                 }
3089
3090                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3091                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3092                 }
3093
3094                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3095                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3096                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3097                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3098                 // backwards compatibility, we never use it in production. To provide test coverage, here,
3099                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3100                 #[allow(unused_assignments, unused_mut)]
3101                 let mut separate_nondust_htlc_sources = false;
3102                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3103                         use core::hash::{BuildHasher, Hasher};
3104                         // Get a random value using the only std API to do so - the DefaultHasher
3105                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3106                         separate_nondust_htlc_sources = rand_val % 2 == 0;
3107                 }
3108
3109                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3110                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3111                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3112                         if let Some(_) = htlc.transaction_output_index {
3113                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3114                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
3115                                         false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3116
3117                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
3118                                 let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3119                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3120                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3121                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3122                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
3123                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3124                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3125                                 }
3126                                 if !separate_nondust_htlc_sources {
3127                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3128                                 }
3129                         } else {
3130                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3131                         }
3132                         if separate_nondust_htlc_sources {
3133                                 if let Some(source) = source_opt.take() {
3134                                         nondust_htlc_sources.push(source);
3135                                 }
3136                         }
3137                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3138                 }
3139
3140                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3141                         commitment_stats.tx,
3142                         msg.signature,
3143                         msg.htlc_signatures.clone(),
3144                         &self.context.get_holder_pubkeys().funding_pubkey,
3145                         self.context.counterparty_funding_pubkey()
3146                 );
3147
3148                 self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3149                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3150
3151                 // Update state now that we've passed all the can-fail calls...
3152                 let mut need_commitment = false;
3153                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3154                         if *update_state == FeeUpdateState::RemoteAnnounced {
3155                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3156                                 need_commitment = true;
3157                         }
3158                 }
3159
3160                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3161                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3162                                 Some(forward_info.clone())
3163                         } else { None };
3164                         if let Some(forward_info) = new_forward {
3165                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3166                                         log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
3167                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3168                                 need_commitment = true;
3169                         }
3170                 }
3171                 let mut claimed_htlcs = Vec::new();
3172                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3173                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3174                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3175                                         log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
3176                                 // Grab the preimage, if it exists, instead of cloning
3177                                 let mut reason = OutboundHTLCOutcome::Success(None);
3178                                 mem::swap(outcome, &mut reason);
3179                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3180                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3181                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3182                                         // have a `Success(None)` reason. In this case we could forget some HTLC
3183                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
3184                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
3185                                         // claim anyway.
3186                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3187                                 }
3188                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3189                                 need_commitment = true;
3190                         }
3191                 }
3192
3193                 self.context.latest_monitor_update_id += 1;
3194                 let mut monitor_update = ChannelMonitorUpdate {
3195                         update_id: self.context.latest_monitor_update_id,
3196                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3197                                 commitment_tx: holder_commitment_tx,
3198                                 htlc_outputs: htlcs_and_sigs,
3199                                 claimed_htlcs,
3200                                 nondust_htlc_sources,
3201                         }]
3202                 };
3203
3204                 self.context.cur_holder_commitment_transaction_number -= 1;
3205                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3206                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3207                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3208
3209                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3210                         // In case we initially failed monitor updating without requiring a response, we need
3211                         // to make sure the RAA gets sent first.
3212                         self.context.monitor_pending_revoke_and_ack = true;
3213                         if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3214                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3215                                 // the corresponding HTLC status updates so that get_last_commitment_update
3216                                 // includes the right HTLCs.
3217                                 self.context.monitor_pending_commitment_signed = true;
3218                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3219                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3220                                 // strictly increasing by one, so decrement it here.
3221                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3222                                 monitor_update.updates.append(&mut additional_update.updates);
3223                         }
3224                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3225                                 log_bytes!(self.context.channel_id));
3226                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
3227                 }
3228
3229                 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3230                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3231                         // we'll send one right away when we get the revoke_and_ack when we
3232                         // free_holding_cell_htlcs().
3233                         let mut additional_update = self.build_commitment_no_status_check(logger);
3234                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3235                         // strictly increasing by one, so decrement it here.
3236                         self.context.latest_monitor_update_id = monitor_update.update_id;
3237                         monitor_update.updates.append(&mut additional_update.updates);
3238                         true
3239                 } else { false };
3240
3241                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3242                         log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3243                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3244                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3245         }
3246
3247         /// Public version of the below, checking relevant preconditions first.
3248         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3249         /// returns `(None, Vec::new())`.
3250         pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
3251                 if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
3252                    (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3253                         self.free_holding_cell_htlcs(logger)
3254                 } else { (None, Vec::new()) }
3255         }
3256
3257         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3258         /// for our counterparty.
3259         fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
3260                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3261                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3262                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3263                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
3264
3265                         let mut monitor_update = ChannelMonitorUpdate {
3266                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3267                                 updates: Vec::new(),
3268                         };
3269
3270                         let mut htlc_updates = Vec::new();
3271                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3272                         let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
3273                         let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
3274                         let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
3275                         let mut htlcs_to_fail = Vec::new();
3276                         for htlc_update in htlc_updates.drain(..) {
3277                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
3278                                 // fee races with adding too many outputs which push our total payments just over
3279                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
3280                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3281                                 // to rebalance channels.
3282                                 match &htlc_update {
3283                                         &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
3284                                                 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
3285                                                         Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
3286                                                         Err(e) => {
3287                                                                 match e {
3288                                                                         ChannelError::Ignore(ref msg) => {
3289                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
3290                                                                                         log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
3291                                                                                 // If we fail to send here, then this HTLC should
3292                                                                                 // be failed backwards. Failing to send here
3293                                                                                 // indicates that this HTLC may keep being put back
3294                                                                                 // into the holding cell without ever being
3295                                                                                 // successfully forwarded/failed/fulfilled, causing
3296                                                                                 // our counterparty to eventually close on us.
3297                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
3298                                                                         },
3299                                                                         _ => {
3300                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3301                                                                         },
3302                                                                 }
3303                                                         }
3304                                                 }
3305                                         },
3306                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3307                                                 // If an HTLC claim was previously added to the holding cell (via
3308                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
3309                                                 // not fail - any in between attempts to claim the HTLC will have resulted
3310                                                 // in it hitting the holding cell again and we cannot change the state of a
3311                                                 // holding cell HTLC from fulfill to anything else.
3312                                                 let (update_fulfill_msg_option, mut additional_monitor_update) =
3313                                                         if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
3314                                                                 (msg, monitor_update)
3315                                                         } else { unreachable!() };
3316                                                 update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
3317                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
3318                                         },
3319                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3320                                                 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3321                                                         Ok(update_fail_msg_option) => {
3322                                                                 // If an HTLC failure was previously added to the holding cell (via
3323                                                                 // `queue_fail_htlc`) then generating the fail message itself must
3324                                                                 // not fail - we should never end up in a state where we double-fail
3325                                                                 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3326                                                                 // for a full revocation before failing.
3327                                                                 update_fail_htlcs.push(update_fail_msg_option.unwrap())
3328                                                         },
3329                                                         Err(e) => {
3330                                                                 if let ChannelError::Ignore(_) = e {}
3331                                                                 else {
3332                                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3333                                                                 }
3334                                                         }
3335                                                 }
3336                                         },
3337                                 }
3338                         }
3339                         if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
3340                                 return (None, htlcs_to_fail);
3341                         }
3342                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3343                                 self.send_update_fee(feerate, false, logger)
3344                         } else {
3345                                 None
3346                         };
3347
3348                         let mut additional_update = self.build_commitment_no_status_check(logger);
3349                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3350                         // but we want them to be strictly increasing by one, so reset it here.
3351                         self.context.latest_monitor_update_id = monitor_update.update_id;
3352                         monitor_update.updates.append(&mut additional_update.updates);
3353
3354                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3355                                 log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
3356                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
3357
3358                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3359                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3360                 } else {
3361                         (None, Vec::new())
3362                 }
3363         }
3364
3365         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3366         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3367         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3368         /// generating an appropriate error *after* the channel state has been updated based on the
3369         /// revoke_and_ack message.
3370         pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
3371                 where L::Target: Logger,
3372         {
3373                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3374                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3375                 }
3376                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3377                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3378                 }
3379                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3380                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3381                 }
3382
3383                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3384
3385                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3386                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3387                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3388                         }
3389                 }
3390
3391                 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3392                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
3393                         // haven't given them a new commitment transaction to broadcast). We should probably
3394                         // take advantage of this by updating our channel monitor, sending them an error, and
3395                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3396                         // lot of work, and there's some chance this is all a misunderstanding anyway.
3397                         // We have to do *something*, though, since our signer may get mad at us for otherwise
3398                         // jumping a remote commitment number, so best to just force-close and move on.
3399                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3400                 }
3401
3402                 #[cfg(any(test, fuzzing))]
3403                 {
3404                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3405                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3406                 }
3407
3408                 self.context.holder_signer.validate_counterparty_revocation(
3409                         self.context.cur_counterparty_commitment_transaction_number + 1,
3410                         &secret
3411                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3412
3413                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3414                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3415                 self.context.latest_monitor_update_id += 1;
3416                 let mut monitor_update = ChannelMonitorUpdate {
3417                         update_id: self.context.latest_monitor_update_id,
3418                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3419                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3420                                 secret: msg.per_commitment_secret,
3421                         }],
3422                 };
3423
3424                 // Update state now that we've passed all the can-fail calls...
3425                 // (note that we may still fail to generate the new commitment_signed message, but that's
3426                 // OK, we step the channel here and *then* if the new generation fails we can fail the
3427                 // channel based on that, but stepping stuff here should be safe either way.
3428                 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3429                 self.context.sent_message_awaiting_response = None;
3430                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3431                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3432                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3433
3434                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3435                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3436                 }
3437
3438                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
3439                 let mut to_forward_infos = Vec::new();
3440                 let mut revoked_htlcs = Vec::new();
3441                 let mut finalized_claimed_htlcs = Vec::new();
3442                 let mut update_fail_htlcs = Vec::new();
3443                 let mut update_fail_malformed_htlcs = Vec::new();
3444                 let mut require_commitment = false;
3445                 let mut value_to_self_msat_diff: i64 = 0;
3446
3447                 {
3448                         // Take references explicitly so that we can hold multiple references to self.context.
3449                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3450                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3451
3452                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3453                         pending_inbound_htlcs.retain(|htlc| {
3454                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3455                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
3456                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3457                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
3458                                         }
3459                                         false
3460                                 } else { true }
3461                         });
3462                         pending_outbound_htlcs.retain(|htlc| {
3463                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3464                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
3465                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3466                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3467                                         } else {
3468                                                 finalized_claimed_htlcs.push(htlc.source.clone());
3469                                                 // They fulfilled, so we sent them money
3470                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
3471                                         }
3472                                         false
3473                                 } else { true }
3474                         });
3475                         for htlc in pending_inbound_htlcs.iter_mut() {
3476                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3477                                         true
3478                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3479                                         true
3480                                 } else { false };
3481                                 if swap {
3482                                         let mut state = InboundHTLCState::Committed;
3483                                         mem::swap(&mut state, &mut htlc.state);
3484
3485                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3486                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
3487                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3488                                                 require_commitment = true;
3489                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3490                                                 match forward_info {
3491                                                         PendingHTLCStatus::Fail(fail_msg) => {
3492                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
3493                                                                 require_commitment = true;
3494                                                                 match fail_msg {
3495                                                                         HTLCFailureMsg::Relay(msg) => {
3496                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3497                                                                                 update_fail_htlcs.push(msg)
3498                                                                         },
3499                                                                         HTLCFailureMsg::Malformed(msg) => {
3500                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3501                                                                                 update_fail_malformed_htlcs.push(msg)
3502                                                                         },
3503                                                                 }
3504                                                         },
3505                                                         PendingHTLCStatus::Forward(forward_info) => {
3506                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
3507                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
3508                                                                 htlc.state = InboundHTLCState::Committed;
3509                                                         }
3510                                                 }
3511                                         }
3512                                 }
3513                         }
3514                         for htlc in pending_outbound_htlcs.iter_mut() {
3515                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3516                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
3517                                         htlc.state = OutboundHTLCState::Committed;
3518                                 }
3519                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3520                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
3521                                         // Grab the preimage, if it exists, instead of cloning
3522                                         let mut reason = OutboundHTLCOutcome::Success(None);
3523                                         mem::swap(outcome, &mut reason);
3524                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3525                                         require_commitment = true;
3526                                 }
3527                         }
3528                 }
3529                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3530
3531                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3532                         match update_state {
3533                                 FeeUpdateState::Outbound => {
3534                                         debug_assert!(self.context.is_outbound());
3535                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3536                                         self.context.feerate_per_kw = feerate;
3537                                         self.context.pending_update_fee = None;
3538                                 },
3539                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3540                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3541                                         debug_assert!(!self.context.is_outbound());
3542                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3543                                         require_commitment = true;
3544                                         self.context.feerate_per_kw = feerate;
3545                                         self.context.pending_update_fee = None;
3546                                 },
3547                         }
3548                 }
3549
3550                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3551                         // We can't actually generate a new commitment transaction (incl by freeing holding
3552                         // cells) while we can't update the monitor, so we just return what we have.
3553                         if require_commitment {
3554                                 self.context.monitor_pending_commitment_signed = true;
3555                                 // When the monitor updating is restored we'll call get_last_commitment_update(),
3556                                 // which does not update state, but we're definitely now awaiting a remote revoke
3557                                 // before we can step forward any more, so set it here.
3558                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3559                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3560                                 // strictly increasing by one, so decrement it here.
3561                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3562                                 monitor_update.updates.append(&mut additional_update.updates);
3563                         }
3564                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3565                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3566                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3567                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
3568                         return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
3569                 }
3570
3571                 match self.free_holding_cell_htlcs(logger) {
3572                         (Some(_), htlcs_to_fail) => {
3573                                 let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
3574                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3575                                 // strictly increasing by one, so decrement it here.
3576                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3577                                 monitor_update.updates.append(&mut additional_update.updates);
3578
3579                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3580                                 Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3581                         },
3582                         (None, htlcs_to_fail) => {
3583                                 if require_commitment {
3584                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3585
3586                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3587                                         // strictly increasing by one, so decrement it here.
3588                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3589                                         monitor_update.updates.append(&mut additional_update.updates);
3590
3591                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
3592                                                 log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
3593                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3594                                         Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3595                                 } else {
3596                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
3597                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3598                                         Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3599                                 }
3600                         }
3601                 }
3602         }
3603
3604         /// Queues up an outbound update fee by placing it in the holding cell. You should call
3605         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3606         /// commitment update.
3607         pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
3608                 let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
3609                 assert!(msg_opt.is_none(), "We forced holding cell?");
3610         }
3611
3612         /// Adds a pending update to this channel. See the doc for send_htlc for
3613         /// further details on the optionness of the return value.
3614         /// If our balance is too low to cover the cost of the next commitment transaction at the
3615         /// new feerate, the update is cancelled.
3616         ///
3617         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3618         /// [`Channel`] if `force_holding_cell` is false.
3619         fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
3620                 if !self.context.is_outbound() {
3621                         panic!("Cannot send fee from inbound channel");
3622                 }
3623                 if !self.context.is_usable() {
3624                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3625                 }
3626                 if !self.context.is_live() {
3627                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3628                 }
3629
3630                 // Before proposing a feerate update, check that we can actually afford the new fee.
3631                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3632                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3633                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3634                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3635                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
3636                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3637                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3638                         //TODO: auto-close after a number of failures?
3639                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3640                         return None;
3641                 }
3642
3643                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3644                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3645                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3646                 if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
3647                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3648                         return None;
3649                 }
3650                 if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
3651                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3652                         return None;
3653                 }
3654
3655                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3656                         force_holding_cell = true;
3657                 }
3658
3659                 if force_holding_cell {
3660                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
3661                         return None;
3662                 }
3663
3664                 debug_assert!(self.context.pending_update_fee.is_none());
3665                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3666
3667                 Some(msgs::UpdateFee {
3668                         channel_id: self.context.channel_id,
3669                         feerate_per_kw,
3670                 })
3671         }
3672
3673         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3674         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3675         /// resent.
3676         /// No further message handling calls may be made until a channel_reestablish dance has
3677         /// completed.
3678         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L)  where L::Target: Logger {
3679                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3680                 if self.context.channel_state < ChannelState::FundingSent as u32 {
3681                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
3682                         return;
3683                 }
3684
3685                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3686                         // While the below code should be idempotent, it's simpler to just return early, as
3687                         // redundant disconnect events can fire, though they should be rare.
3688                         return;
3689                 }
3690
3691                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3692                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3693                 }
3694
3695                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3696                 // will be retransmitted.
3697                 self.context.last_sent_closing_fee = None;
3698                 self.context.pending_counterparty_closing_signed = None;
3699                 self.context.closing_fee_limits = None;
3700
3701                 let mut inbound_drop_count = 0;
3702                 self.context.pending_inbound_htlcs.retain(|htlc| {
3703                         match htlc.state {
3704                                 InboundHTLCState::RemoteAnnounced(_) => {
3705                                         // They sent us an update_add_htlc but we never got the commitment_signed.
3706                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
3707                                         // this HTLC accordingly
3708                                         inbound_drop_count += 1;
3709                                         false
3710                                 },
3711                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3712                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
3713                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3714                                         // in response to it yet, so don't touch it.
3715                                         true
3716                                 },
3717                                 InboundHTLCState::Committed => true,
3718                                 InboundHTLCState::LocalRemoved(_) => {
3719                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3720                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
3721                                         // (that we missed). Keep this around for now and if they tell us they missed
3722                                         // the commitment_signed we can re-transmit the update then.
3723                                         true
3724                                 },
3725                         }
3726                 });
3727                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3728
3729                 if let Some((_, update_state)) = self.context.pending_update_fee {
3730                         if update_state == FeeUpdateState::RemoteAnnounced {
3731                                 debug_assert!(!self.context.is_outbound());
3732                                 self.context.pending_update_fee = None;
3733                         }
3734                 }
3735
3736                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3737                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3738                                 // They sent us an update to remove this but haven't yet sent the corresponding
3739                                 // commitment_signed, we need to move it back to Committed and they can re-send
3740                                 // the update upon reconnection.
3741                                 htlc.state = OutboundHTLCState::Committed;
3742                         }
3743                 }
3744
3745                 self.context.sent_message_awaiting_response = None;
3746
3747                 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3748                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
3749         }
3750
3751         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3752         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3753         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3754         /// update completes (potentially immediately).
3755         /// The messages which were generated with the monitor update must *not* have been sent to the
3756         /// remote end, and must instead have been dropped. They will be regenerated when
3757         /// [`Self::monitor_updating_restored`] is called.
3758         ///
3759         /// [`ChannelManager`]: super::channelmanager::ChannelManager
3760         /// [`chain::Watch`]: crate::chain::Watch
3761         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3762         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3763                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3764                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3765                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3766         ) {
3767                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3768                 self.context.monitor_pending_commitment_signed |= resend_commitment;
3769                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3770                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3771                 self.context.monitor_pending_failures.append(&mut pending_fails);
3772                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3773                 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3774         }
3775
3776         /// Indicates that the latest ChannelMonitor update has been committed by the client
3777         /// successfully and we should restore normal operation. Returns messages which should be sent
3778         /// to the remote side.
3779         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3780                 &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
3781                 user_config: &UserConfig, best_block_height: u32
3782         ) -> MonitorRestoreUpdates
3783         where
3784                 L::Target: Logger,
3785                 NS::Target: NodeSigner
3786         {
3787                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3788                 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3789                 let mut found_blocked = false;
3790                 self.context.pending_monitor_updates.retain(|upd| {
3791                         if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
3792                         if upd.blocked { found_blocked = true; }
3793                         upd.blocked
3794                 });
3795
3796                 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3797                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3798                 // first received the funding_signed.
3799                 let mut funding_broadcastable =
3800                         if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
3801                                 self.context.funding_transaction.take()
3802                         } else { None };
3803                 // That said, if the funding transaction is already confirmed (ie we're active with a
3804                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3805                 if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3806                         funding_broadcastable = None;
3807                 }
3808
3809                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3810                 // (and we assume the user never directly broadcasts the funding transaction and waits for
3811                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3812                 // * an inbound channel that failed to persist the monitor on funding_created and we got
3813                 //   the funding transaction confirmed before the monitor was persisted, or
3814                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3815                 let channel_ready = if self.context.monitor_pending_channel_ready {
3816                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3817                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3818                         self.context.monitor_pending_channel_ready = false;
3819                         let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3820                         Some(msgs::ChannelReady {
3821                                 channel_id: self.context.channel_id(),
3822                                 next_per_commitment_point,
3823                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3824                         })
3825                 } else { None };
3826
3827                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
3828
3829                 let mut accepted_htlcs = Vec::new();
3830                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3831                 let mut failed_htlcs = Vec::new();
3832                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3833                 let mut finalized_claimed_htlcs = Vec::new();
3834                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3835
3836                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3837                         self.context.monitor_pending_revoke_and_ack = false;
3838                         self.context.monitor_pending_commitment_signed = false;
3839                         return MonitorRestoreUpdates {
3840                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3841                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3842                         };
3843                 }
3844
3845                 let raa = if self.context.monitor_pending_revoke_and_ack {
3846                         Some(self.get_last_revoke_and_ack())
3847                 } else { None };
3848                 let commitment_update = if self.context.monitor_pending_commitment_signed {
3849                         self.mark_awaiting_response();
3850                         Some(self.get_last_commitment_update(logger))
3851                 } else { None };
3852
3853                 self.context.monitor_pending_revoke_and_ack = false;
3854                 self.context.monitor_pending_commitment_signed = false;
3855                 let order = self.context.resend_order.clone();
3856                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3857                         log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3858                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3859                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3860                 MonitorRestoreUpdates {
3861                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3862                 }
3863         }
3864
3865         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3866                 where F::Target: FeeEstimator, L::Target: Logger
3867         {
3868                 if self.context.is_outbound() {
3869                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3870                 }
3871                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3872                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3873                 }
3874                 Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3875                 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3876
3877                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3878                 self.context.update_time_counter += 1;
3879                 // If the feerate has increased over the previous dust buffer (note that
3880                 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3881                 // won't be pushed over our dust exposure limit by the feerate increase.
3882                 if feerate_over_dust_buffer {
3883                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3884                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3885                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3886                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3887                         if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
3888                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3889                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
3890                         }
3891                         if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
3892                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3893                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3894                         }
3895                 }
3896                 Ok(())
3897         }
3898
3899         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
3900                 let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3901                 let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
3902                 msgs::RevokeAndACK {
3903                         channel_id: self.context.channel_id,
3904                         per_commitment_secret,
3905                         next_per_commitment_point,
3906                         #[cfg(taproot)]
3907                         next_local_nonce: None,
3908                 }
3909         }
3910
3911         fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
3912                 let mut update_add_htlcs = Vec::new();
3913                 let mut update_fulfill_htlcs = Vec::new();
3914                 let mut update_fail_htlcs = Vec::new();
3915                 let mut update_fail_malformed_htlcs = Vec::new();
3916
3917                 for htlc in self.context.pending_outbound_htlcs.iter() {
3918                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
3919                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
3920                                         channel_id: self.context.channel_id(),
3921                                         htlc_id: htlc.htlc_id,
3922                                         amount_msat: htlc.amount_msat,
3923                                         payment_hash: htlc.payment_hash,
3924                                         cltv_expiry: htlc.cltv_expiry,
3925                                         onion_routing_packet: (**onion_packet).clone(),
3926                                 });
3927                         }
3928                 }
3929
3930                 for htlc in self.context.pending_inbound_htlcs.iter() {
3931                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3932                                 match reason {
3933                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
3934                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
3935                                                         channel_id: self.context.channel_id(),
3936                                                         htlc_id: htlc.htlc_id,
3937                                                         reason: err_packet.clone()
3938                                                 });
3939                                         },
3940                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
3941                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
3942                                                         channel_id: self.context.channel_id(),
3943                                                         htlc_id: htlc.htlc_id,
3944                                                         sha256_of_onion: sha256_of_onion.clone(),
3945                                                         failure_code: failure_code.clone(),
3946                                                 });
3947                                         },
3948                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
3949                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
3950                                                         channel_id: self.context.channel_id(),
3951                                                         htlc_id: htlc.htlc_id,
3952                                                         payment_preimage: payment_preimage.clone(),
3953                                                 });
3954                                         },
3955                                 }
3956                         }
3957                 }
3958
3959                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
3960                         Some(msgs::UpdateFee {
3961                                 channel_id: self.context.channel_id(),
3962                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
3963                         })
3964                 } else { None };
3965
3966                 log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3967                                 log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
3968                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3969                 msgs::CommitmentUpdate {
3970                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
3971                         commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
3972                 }
3973         }
3974
3975         /// May panic if some calls other than message-handling calls (which will all Err immediately)
3976         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
3977         ///
3978         /// Some links printed in log lines are included here to check them during build (when run with
3979         /// `cargo doc --document-private-items`):
3980         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
3981         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
3982         pub fn channel_reestablish<L: Deref, NS: Deref>(
3983                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
3984                 genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
3985         ) -> Result<ReestablishResponses, ChannelError>
3986         where
3987                 L::Target: Logger,
3988                 NS::Target: NodeSigner
3989         {
3990                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
3991                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
3992                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
3993                         // just close here instead of trying to recover.
3994                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
3995                 }
3996
3997                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
3998                         msg.next_local_commitment_number == 0 {
3999                         return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
4000                 }
4001
4002                 if msg.next_remote_commitment_number > 0 {
4003                         let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4004                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4005                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4006                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4007                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4008                         }
4009                         if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4010                                 macro_rules! log_and_panic {
4011                                         ($err_msg: expr) => {
4012                                                 log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
4013                                                 panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
4014                                         }
4015                                 }
4016                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4017                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4018                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4019                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4020                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4021                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4022                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4023                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4024                         }
4025                 }
4026
4027                 // Before we change the state of the channel, we check if the peer is sending a very old
4028                 // commitment transaction number, if yes we send a warning message.
4029                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4030                 if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4031                         return Err(
4032                                 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4033                         );
4034                 }
4035
4036                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4037                 // remaining cases either succeed or ErrorMessage-fail).
4038                 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4039                 self.context.sent_message_awaiting_response = None;
4040
4041                 let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4042                         assert!(self.context.shutdown_scriptpubkey.is_some());
4043                         Some(msgs::Shutdown {
4044                                 channel_id: self.context.channel_id,
4045                                 scriptpubkey: self.get_closing_scriptpubkey(),
4046                         })
4047                 } else { None };
4048
4049                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
4050
4051                 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4052                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4053                         if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4054                                         self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4055                                 if msg.next_remote_commitment_number != 0 {
4056                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4057                                 }
4058                                 // Short circuit the whole handler as there is nothing we can resend them
4059                                 return Ok(ReestablishResponses {
4060                                         channel_ready: None,
4061                                         raa: None, commitment_update: None,
4062                                         order: RAACommitmentOrder::CommitmentFirst,
4063                                         shutdown_msg, announcement_sigs,
4064                                 });
4065                         }
4066
4067                         // We have OurChannelReady set!
4068                         let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4069                         return Ok(ReestablishResponses {
4070                                 channel_ready: Some(msgs::ChannelReady {
4071                                         channel_id: self.context.channel_id(),
4072                                         next_per_commitment_point,
4073                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
4074                                 }),
4075                                 raa: None, commitment_update: None,
4076                                 order: RAACommitmentOrder::CommitmentFirst,
4077                                 shutdown_msg, announcement_sigs,
4078                         });
4079                 }
4080
4081                 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4082                         // Remote isn't waiting on any RevokeAndACK from us!
4083                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4084                         None
4085                 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4086                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4087                                 self.context.monitor_pending_revoke_and_ack = true;
4088                                 None
4089                         } else {
4090                                 Some(self.get_last_revoke_and_ack())
4091                         }
4092                 } else {
4093                         return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4094                 };
4095
4096                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4097                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4098                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4099                 // the corresponding revoke_and_ack back yet.
4100                 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4101                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4102                         self.mark_awaiting_response();
4103                 }
4104                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4105
4106                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4107                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4108                         let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4109                         Some(msgs::ChannelReady {
4110                                 channel_id: self.context.channel_id(),
4111                                 next_per_commitment_point,
4112                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4113                         })
4114                 } else { None };
4115
4116                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4117                         if required_revoke.is_some() {
4118                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
4119                         } else {
4120                                 log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
4121                         }
4122
4123                         Ok(ReestablishResponses {
4124                                 channel_ready, shutdown_msg, announcement_sigs,
4125                                 raa: required_revoke,
4126                                 commitment_update: None,
4127                                 order: self.context.resend_order.clone(),
4128                         })
4129                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4130                         if required_revoke.is_some() {
4131                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
4132                         } else {
4133                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
4134                         }
4135
4136                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4137                                 self.context.monitor_pending_commitment_signed = true;
4138                                 Ok(ReestablishResponses {
4139                                         channel_ready, shutdown_msg, announcement_sigs,
4140                                         commitment_update: None, raa: None,
4141                                         order: self.context.resend_order.clone(),
4142                                 })
4143                         } else {
4144                                 Ok(ReestablishResponses {
4145                                         channel_ready, shutdown_msg, announcement_sigs,
4146                                         raa: required_revoke,
4147                                         commitment_update: Some(self.get_last_commitment_update(logger)),
4148                                         order: self.context.resend_order.clone(),
4149                                 })
4150                         }
4151                 } else {
4152                         Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4153                 }
4154         }
4155
4156         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4157         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4158         /// at which point they will be recalculated.
4159         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4160                 -> (u64, u64)
4161                 where F::Target: FeeEstimator
4162         {
4163                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4164
4165                 // Propose a range from our current Background feerate to our Normal feerate plus our
4166                 // force_close_avoidance_max_fee_satoshis.
4167                 // If we fail to come to consensus, we'll have to force-close.
4168                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
4169                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
4170                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4171
4172                 // The spec requires that (when the channel does not have anchors) we only send absolute
4173                 // channel fees no greater than the absolute channel fee on the current commitment
4174                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4175                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4176                 // some force-closure by old nodes, but we wanted to close the channel anyway.
4177
4178                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4179                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4180                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4181                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4182                 }
4183
4184                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4185                 // below our dust limit, causing the output to disappear. We don't bother handling this
4186                 // case, however, as this should only happen if a channel is closed before any (material)
4187                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4188                 // come to consensus with our counterparty on appropriate fees, however it should be a
4189                 // relatively rare case. We can revisit this later, though note that in order to determine
4190                 // if the funders' output is dust we have to know the absolute fee we're going to use.
4191                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4192                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4193                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4194                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
4195                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
4196                                 // target feerate-calculated fee.
4197                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4198                                         proposed_max_feerate as u64 * tx_weight / 1000)
4199                         } else {
4200                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4201                         };
4202
4203                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4204                 self.context.closing_fee_limits.clone().unwrap()
4205         }
4206
4207         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4208         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4209         /// this point if we're the funder we should send the initial closing_signed, and in any case
4210         /// shutdown should complete within a reasonable timeframe.
4211         fn closing_negotiation_ready(&self) -> bool {
4212                 self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
4213                         self.context.channel_state &
4214                                 (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
4215                                  ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
4216                                 == BOTH_SIDES_SHUTDOWN_MASK &&
4217                         self.context.pending_update_fee.is_none()
4218         }
4219
4220         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4221         /// an Err if no progress is being made and the channel should be force-closed instead.
4222         /// Should be called on a one-minute timer.
4223         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4224                 if self.closing_negotiation_ready() {
4225                         if self.context.closing_signed_in_flight {
4226                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4227                         } else {
4228                                 self.context.closing_signed_in_flight = true;
4229                         }
4230                 }
4231                 Ok(())
4232         }
4233
4234         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4235                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4236                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4237                 where F::Target: FeeEstimator, L::Target: Logger
4238         {
4239                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4240                         return Ok((None, None));
4241                 }
4242
4243                 if !self.context.is_outbound() {
4244                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4245                                 return self.closing_signed(fee_estimator, &msg);
4246                         }
4247                         return Ok((None, None));
4248                 }
4249
4250                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4251
4252                 assert!(self.context.shutdown_scriptpubkey.is_some());
4253                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4254                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4255                         our_min_fee, our_max_fee, total_fee_satoshis);
4256
4257                 let sig = self.context.holder_signer
4258                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4259                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4260
4261                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4262                 Ok((Some(msgs::ClosingSigned {
4263                         channel_id: self.context.channel_id,
4264                         fee_satoshis: total_fee_satoshis,
4265                         signature: sig,
4266                         fee_range: Some(msgs::ClosingSignedFeeRange {
4267                                 min_fee_satoshis: our_min_fee,
4268                                 max_fee_satoshis: our_max_fee,
4269                         }),
4270                 }), None))
4271         }
4272
4273         // Marks a channel as waiting for a response from the counterparty. If it's not received
4274         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4275         // a reconnection.
4276         fn mark_awaiting_response(&mut self) {
4277                 self.context.sent_message_awaiting_response = Some(0);
4278         }
4279
4280         /// Determines whether we should disconnect the counterparty due to not receiving a response
4281         /// within our expected timeframe.
4282         ///
4283         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4284         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4285                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4286                         ticks_elapsed
4287                 } else {
4288                         // Don't disconnect when we're not waiting on a response.
4289                         return false;
4290                 };
4291                 *ticks_elapsed += 1;
4292                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4293         }
4294
4295         pub fn shutdown<SP: Deref>(
4296                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4297         ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4298         where SP::Target: SignerProvider
4299         {
4300                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4301                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4302                 }
4303                 if self.context.channel_state < ChannelState::FundingSent as u32 {
4304                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
4305                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4306                         // can do that via error message without getting a connection fail anyway...
4307                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4308                 }
4309                 for htlc in self.context.pending_inbound_htlcs.iter() {
4310                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4311                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4312                         }
4313                 }
4314                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4315
4316                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4317                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4318                 }
4319
4320                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4321                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4322                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4323                         }
4324                 } else {
4325                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4326                 }
4327
4328                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4329                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4330                 // any further commitment updates after we set LocalShutdownSent.
4331                 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4332
4333                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4334                         Some(_) => false,
4335                         None => {
4336                                 assert!(send_shutdown);
4337                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4338                                         Ok(scriptpubkey) => scriptpubkey,
4339                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4340                                 };
4341                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
4342                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4343                                 }
4344                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4345                                 true
4346                         },
4347                 };
4348
4349                 // From here on out, we may not fail!
4350
4351                 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4352                 self.context.update_time_counter += 1;
4353
4354                 let monitor_update = if update_shutdown_script {
4355                         self.context.latest_monitor_update_id += 1;
4356                         let monitor_update = ChannelMonitorUpdate {
4357                                 update_id: self.context.latest_monitor_update_id,
4358                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4359                                         scriptpubkey: self.get_closing_scriptpubkey(),
4360                                 }],
4361                         };
4362                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4363                         if self.push_blockable_mon_update(monitor_update) {
4364                                 self.context.pending_monitor_updates.last().map(|upd| &upd.update)
4365                         } else { None }
4366                 } else { None };
4367                 let shutdown = if send_shutdown {
4368                         Some(msgs::Shutdown {
4369                                 channel_id: self.context.channel_id,
4370                                 scriptpubkey: self.get_closing_scriptpubkey(),
4371                         })
4372                 } else { None };
4373
4374                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4375                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4376                 // cell HTLCs and return them to fail the payment.
4377                 self.context.holding_cell_update_fee = None;
4378                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4379                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4380                         match htlc_update {
4381                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4382                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4383                                         false
4384                                 },
4385                                 _ => true
4386                         }
4387                 });
4388
4389                 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4390                 self.context.update_time_counter += 1;
4391
4392                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4393         }
4394
4395         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4396                 let mut tx = closing_tx.trust().built_transaction().clone();
4397
4398                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4399
4400                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4401                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4402                 let mut holder_sig = sig.serialize_der().to_vec();
4403                 holder_sig.push(EcdsaSighashType::All as u8);
4404                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4405                 cp_sig.push(EcdsaSighashType::All as u8);
4406                 if funding_key[..] < counterparty_funding_key[..] {
4407                         tx.input[0].witness.push(holder_sig);
4408                         tx.input[0].witness.push(cp_sig);
4409                 } else {
4410                         tx.input[0].witness.push(cp_sig);
4411                         tx.input[0].witness.push(holder_sig);
4412                 }
4413
4414                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4415                 tx
4416         }
4417
4418         pub fn closing_signed<F: Deref>(
4419                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4420                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4421                 where F::Target: FeeEstimator
4422         {
4423                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4424                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4425                 }
4426                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4427                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4428                 }
4429                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4430                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4431                 }
4432                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4433                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4434                 }
4435
4436                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4437                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4438                 }
4439
4440                 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4441                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
4442                         return Ok((None, None));
4443                 }
4444
4445                 let funding_redeemscript = self.context.get_funding_redeemscript();
4446                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4447                 if used_total_fee != msg.fee_satoshis {
4448                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4449                 }
4450                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4451
4452                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4453                         Ok(_) => {},
4454                         Err(_e) => {
4455                                 // The remote end may have decided to revoke their output due to inconsistent dust
4456                                 // limits, so check for that case by re-checking the signature here.
4457                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4458                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4459                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4460                         },
4461                 };
4462
4463                 for outp in closing_tx.trust().built_transaction().output.iter() {
4464                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4465                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4466                         }
4467                 }
4468
4469                 assert!(self.context.shutdown_scriptpubkey.is_some());
4470                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4471                         if last_fee == msg.fee_satoshis {
4472                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4473                                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4474                                 self.context.update_time_counter += 1;
4475                                 return Ok((None, Some(tx)));
4476                         }
4477                 }
4478
4479                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4480
4481                 macro_rules! propose_fee {
4482                         ($new_fee: expr) => {
4483                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4484                                         (closing_tx, $new_fee)
4485                                 } else {
4486                                         self.build_closing_transaction($new_fee, false)
4487                                 };
4488
4489                                 let sig = self.context.holder_signer
4490                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4491                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4492
4493                                 let signed_tx = if $new_fee == msg.fee_satoshis {
4494                                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
4495                                         self.context.update_time_counter += 1;
4496                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4497                                         Some(tx)
4498                                 } else { None };
4499
4500                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4501                                 return Ok((Some(msgs::ClosingSigned {
4502                                         channel_id: self.context.channel_id,
4503                                         fee_satoshis: used_fee,
4504                                         signature: sig,
4505                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4506                                                 min_fee_satoshis: our_min_fee,
4507                                                 max_fee_satoshis: our_max_fee,
4508                                         }),
4509                                 }), signed_tx))
4510                         }
4511                 }
4512
4513                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4514                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4515                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4516                         }
4517                         if max_fee_satoshis < our_min_fee {
4518                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4519                         }
4520                         if min_fee_satoshis > our_max_fee {
4521                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4522                         }
4523
4524                         if !self.context.is_outbound() {
4525                                 // They have to pay, so pick the highest fee in the overlapping range.
4526                                 // We should never set an upper bound aside from their full balance
4527                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4528                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4529                         } else {
4530                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4531                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4532                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
4533                                 }
4534                                 // The proposed fee is in our acceptable range, accept it and broadcast!
4535                                 propose_fee!(msg.fee_satoshis);
4536                         }
4537                 } else {
4538                         // Old fee style negotiation. We don't bother to enforce whether they are complying
4539                         // with the "making progress" requirements, we just comply and hope for the best.
4540                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4541                                 if msg.fee_satoshis > last_fee {
4542                                         if msg.fee_satoshis < our_max_fee {
4543                                                 propose_fee!(msg.fee_satoshis);
4544                                         } else if last_fee < our_max_fee {
4545                                                 propose_fee!(our_max_fee);
4546                                         } else {
4547                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4548                                         }
4549                                 } else {
4550                                         if msg.fee_satoshis > our_min_fee {
4551                                                 propose_fee!(msg.fee_satoshis);
4552                                         } else if last_fee > our_min_fee {
4553                                                 propose_fee!(our_min_fee);
4554                                         } else {
4555                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4556                                         }
4557                                 }
4558                         } else {
4559                                 if msg.fee_satoshis < our_min_fee {
4560                                         propose_fee!(our_min_fee);
4561                                 } else if msg.fee_satoshis > our_max_fee {
4562                                         propose_fee!(our_max_fee);
4563                                 } else {
4564                                         propose_fee!(msg.fee_satoshis);
4565                                 }
4566                         }
4567                 }
4568         }
4569
4570         fn internal_htlc_satisfies_config(
4571                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4572         ) -> Result<(), (&'static str, u16)> {
4573                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4574                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4575                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4576                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4577                         return Err((
4578                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4579                                 0x1000 | 12, // fee_insufficient
4580                         ));
4581                 }
4582                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4583                         return Err((
4584                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4585                                 0x1000 | 13, // incorrect_cltv_expiry
4586                         ));
4587                 }
4588                 Ok(())
4589         }
4590
4591         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4592         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4593         /// unsuccessful, falls back to the previous one if one exists.
4594         pub fn htlc_satisfies_config(
4595                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4596         ) -> Result<(), (&'static str, u16)> {
4597                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4598                         .or_else(|err| {
4599                                 if let Some(prev_config) = self.context.prev_config() {
4600                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4601                                 } else {
4602                                         Err(err)
4603                                 }
4604                         })
4605         }
4606
4607         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4608                 self.context.cur_holder_commitment_transaction_number + 1
4609         }
4610
4611         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4612                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4613         }
4614
4615         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4616                 self.context.cur_counterparty_commitment_transaction_number + 2
4617         }
4618
4619         #[cfg(test)]
4620         pub fn get_signer(&self) -> &Signer {
4621                 &self.context.holder_signer
4622         }
4623
4624         #[cfg(test)]
4625         pub fn get_value_stat(&self) -> ChannelValueStat {
4626                 ChannelValueStat {
4627                         value_to_self_msat: self.context.value_to_self_msat,
4628                         channel_value_msat: self.context.channel_value_satoshis * 1000,
4629                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4630                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4631                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4632                         holding_cell_outbound_amount_msat: {
4633                                 let mut res = 0;
4634                                 for h in self.context.holding_cell_htlc_updates.iter() {
4635                                         match h {
4636                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4637                                                         res += amount_msat;
4638                                                 }
4639                                                 _ => {}
4640                                         }
4641                                 }
4642                                 res
4643                         },
4644                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4645                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4646                 }
4647         }
4648
4649         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4650         /// Allowed in any state (including after shutdown)
4651         pub fn is_awaiting_monitor_update(&self) -> bool {
4652                 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4653         }
4654
4655         pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
4656                 if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4657                 self.context.pending_monitor_updates[0].update.update_id - 1
4658         }
4659
4660         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4661         /// further blocked monitor update exists after the next.
4662         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
4663                 for i in 0..self.context.pending_monitor_updates.len() {
4664                         if self.context.pending_monitor_updates[i].blocked {
4665                                 self.context.pending_monitor_updates[i].blocked = false;
4666                                 return Some((&self.context.pending_monitor_updates[i].update,
4667                                         self.context.pending_monitor_updates.len() > i + 1));
4668                         }
4669                 }
4670                 None
4671         }
4672
4673         /// Pushes a new monitor update into our monitor update queue, returning whether it should be
4674         /// immediately given to the user for persisting or if it should be held as blocked.
4675         fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
4676                 let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
4677                 self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
4678                         update, blocked: !release_monitor
4679                 });
4680                 release_monitor
4681         }
4682
4683         /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
4684         /// it should be immediately given to the user for persisting or `None` if it should be held as
4685         /// blocked.
4686         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4687         -> Option<&ChannelMonitorUpdate> {
4688                 let release_monitor = self.push_blockable_mon_update(update);
4689                 if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
4690         }
4691
4692         pub fn no_monitor_updates_pending(&self) -> bool {
4693                 self.context.pending_monitor_updates.is_empty()
4694         }
4695
4696         pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
4697                 self.context.pending_monitor_updates.retain(|upd| {
4698                         if upd.update.update_id <= update_id {
4699                                 assert!(!upd.blocked, "Completed update must have flown");
4700                                 false
4701                         } else { true }
4702                 });
4703         }
4704
4705         pub fn complete_one_mon_update(&mut self, update_id: u64) {
4706                 self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
4707         }
4708
4709         /// Returns an iterator over all unblocked monitor updates which have not yet completed.
4710         pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
4711                 self.context.pending_monitor_updates.iter()
4712                         .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
4713         }
4714
4715         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4716         /// If the channel is outbound, this implies we have not yet broadcasted the funding
4717         /// transaction. If the channel is inbound, this implies simply that the channel has not
4718         /// advanced state.
4719         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4720                 if !self.is_awaiting_monitor_update() { return false; }
4721                 if self.context.channel_state &
4722                         !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
4723                                 == ChannelState::FundingSent as u32 {
4724                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4725                         // FundingSent set, though our peer could have sent their channel_ready.
4726                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4727                         return true;
4728                 }
4729                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4730                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4731                         // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4732                         // waiting for the initial monitor persistence. Thus, we check if our commitment
4733                         // transaction numbers have both been iterated only exactly once (for the
4734                         // funding_signed), and we're awaiting monitor update.
4735                         //
4736                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4737                         // only way to get an awaiting-monitor-update state during initial funding is if the
4738                         // initial monitor persistence is still pending).
4739                         //
4740                         // Because deciding we're awaiting initial broadcast spuriously could result in
4741                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4742                         // we hard-assert here, even in production builds.
4743                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4744                         assert!(self.context.monitor_pending_channel_ready);
4745                         assert_eq!(self.context.latest_monitor_update_id, 0);
4746                         return true;
4747                 }
4748                 false
4749         }
4750
4751         /// Returns true if our channel_ready has been sent
4752         pub fn is_our_channel_ready(&self) -> bool {
4753                 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32
4754         }
4755
4756         /// Returns true if our peer has either initiated or agreed to shut down the channel.
4757         pub fn received_shutdown(&self) -> bool {
4758                 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4759         }
4760
4761         /// Returns true if we either initiated or agreed to shut down the channel.
4762         pub fn sent_shutdown(&self) -> bool {
4763                 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4764         }
4765
4766         /// Returns true if this channel is fully shut down. True here implies that no further actions
4767         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4768         /// will be handled appropriately by the chain monitor.
4769         pub fn is_shutdown(&self) -> bool {
4770                 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
4771                         assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4772                         true
4773                 } else { false }
4774         }
4775
4776         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4777                 self.context.channel_update_status
4778         }
4779
4780         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4781                 self.context.update_time_counter += 1;
4782                 self.context.channel_update_status = status;
4783         }
4784
4785         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4786                 // Called:
4787                 //  * always when a new block/transactions are confirmed with the new height
4788                 //  * when funding is signed with a height of 0
4789                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4790                         return None;
4791                 }
4792
4793                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4794                 if funding_tx_confirmations <= 0 {
4795                         self.context.funding_tx_confirmation_height = 0;
4796                 }
4797
4798                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4799                         return None;
4800                 }
4801
4802                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4803                 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4804                         self.context.channel_state |= ChannelState::OurChannelReady as u32;
4805                         true
4806                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4807                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4808                         self.context.update_time_counter += 1;
4809                         true
4810                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4811                         // We got a reorg but not enough to trigger a force close, just ignore.
4812                         false
4813                 } else {
4814                         if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 {
4815                                 // We should never see a funding transaction on-chain until we've received
4816                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4817                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4818                                 // however, may do this and we shouldn't treat it as a bug.
4819                                 #[cfg(not(fuzzing))]
4820                                 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4821                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4822                                         self.context.channel_state);
4823                         }
4824                         // We got a reorg but not enough to trigger a force close, just ignore.
4825                         false
4826                 };
4827
4828                 if need_commitment_update {
4829                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4830                                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4831                                         let next_per_commitment_point =
4832                                                 self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4833                                         return Some(msgs::ChannelReady {
4834                                                 channel_id: self.context.channel_id,
4835                                                 next_per_commitment_point,
4836                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4837                                         });
4838                                 }
4839                         } else {
4840                                 self.context.monitor_pending_channel_ready = true;
4841                         }
4842                 }
4843                 None
4844         }
4845
4846         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4847         /// In the first case, we store the confirmation height and calculating the short channel id.
4848         /// In the second, we simply return an Err indicating we need to be force-closed now.
4849         pub fn transactions_confirmed<NS: Deref, L: Deref>(
4850                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
4851                 genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
4852         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4853         where
4854                 NS::Target: NodeSigner,
4855                 L::Target: Logger
4856         {
4857                 if let Some(funding_txo) = self.context.get_funding_txo() {
4858                         for &(index_in_block, tx) in txdata.iter() {
4859                                 // Check if the transaction is the expected funding transaction, and if it is,
4860                                 // check that it pays the right amount to the right script.
4861                                 if self.context.funding_tx_confirmation_height == 0 {
4862                                         if tx.txid() == funding_txo.txid {
4863                                                 let txo_idx = funding_txo.index as usize;
4864                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
4865                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
4866                                                         if self.context.is_outbound() {
4867                                                                 // If we generated the funding transaction and it doesn't match what it
4868                                                                 // should, the client is really broken and we should just panic and
4869                                                                 // tell them off. That said, because hash collisions happen with high
4870                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
4871                                                                 // channel and move on.
4872                                                                 #[cfg(not(fuzzing))]
4873                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4874                                                         }
4875                                                         self.context.update_time_counter += 1;
4876                                                         let err_reason = "funding tx had wrong script/value or output index";
4877                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
4878                                                 } else {
4879                                                         if self.context.is_outbound() {
4880                                                                 for input in tx.input.iter() {
4881                                                                         if input.witness.is_empty() {
4882                                                                                 // We generated a malleable funding transaction, implying we've
4883                                                                                 // just exposed ourselves to funds loss to our counterparty.
4884                                                                                 #[cfg(not(fuzzing))]
4885                                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4886                                                                         }
4887                                                                 }
4888                                                         }
4889                                                         self.context.funding_tx_confirmation_height = height;
4890                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
4891                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
4892                                                                 Ok(scid) => Some(scid),
4893                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
4894                                                         }
4895                                                 }
4896                                         }
4897                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
4898                                         // send it immediately instead of waiting for a best_block_updated call (which
4899                                         // may have already happened for this block).
4900                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
4901                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
4902                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
4903                                                 return Ok((Some(channel_ready), announcement_sigs));
4904                                         }
4905                                 }
4906                                 for inp in tx.input.iter() {
4907                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
4908                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
4909                                                 return Err(ClosureReason::CommitmentTxConfirmed);
4910                                         }
4911                                 }
4912                         }
4913                 }
4914                 Ok((None, None))
4915         }
4916
4917         /// When a new block is connected, we check the height of the block against outbound holding
4918         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
4919         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
4920         /// handled by the ChannelMonitor.
4921         ///
4922         /// If we return Err, the channel may have been closed, at which point the standard
4923         /// requirements apply - no calls may be made except those explicitly stated to be allowed
4924         /// post-shutdown.
4925         ///
4926         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
4927         /// back.
4928         pub fn best_block_updated<NS: Deref, L: Deref>(
4929                 &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
4930                 node_signer: &NS, user_config: &UserConfig, logger: &L
4931         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4932         where
4933                 NS::Target: NodeSigner,
4934                 L::Target: Logger
4935         {
4936                 self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
4937         }
4938
4939         fn do_best_block_updated<NS: Deref, L: Deref>(
4940                 &mut self, height: u32, highest_header_time: u32,
4941                 genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
4942         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4943         where
4944                 NS::Target: NodeSigner,
4945                 L::Target: Logger
4946         {
4947                 let mut timed_out_htlcs = Vec::new();
4948                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4949                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
4950                 // ~now.
4951                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
4952                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4953                         match htlc_update {
4954                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
4955                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
4956                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
4957                                                 false
4958                                         } else { true }
4959                                 },
4960                                 _ => true
4961                         }
4962                 });
4963
4964                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
4965
4966                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4967                         let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
4968                                 self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
4969                         } else { None };
4970                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
4971                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
4972                 }
4973
4974                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4975                 if non_shutdown_state >= ChannelState::ChannelReady as u32 ||
4976                    (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
4977                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4978                         if self.context.funding_tx_confirmation_height == 0 {
4979                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
4980                                 // zero if it has been reorged out, however in either case, our state flags
4981                                 // indicate we've already sent a channel_ready
4982                                 funding_tx_confirmations = 0;
4983                         }
4984
4985                         // If we've sent channel_ready (or have both sent and received channel_ready), and
4986                         // the funding transaction has become unconfirmed,
4987                         // close the channel and hope we can get the latest state on chain (because presumably
4988                         // the funding transaction is at least still in the mempool of most nodes).
4989                         //
4990                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
4991                         // 0-conf channel, but not doing so may lead to the
4992                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
4993                         // to.
4994                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
4995                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
4996                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
4997                                 return Err(ClosureReason::ProcessingError { err: err_reason });
4998                         }
4999                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5000                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5001                         log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id));
5002                         // If funding_tx_confirmed_in is unset, the channel must not be active
5003                         assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
5004                         assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5005                         return Err(ClosureReason::FundingTimedOut);
5006                 }
5007
5008                 let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
5009                         self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
5010                 } else { None };
5011                 Ok((None, timed_out_htlcs, announcement_sigs))
5012         }
5013
5014         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5015         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5016         /// before the channel has reached channel_ready and we can just wait for more blocks.
5017         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5018                 if self.context.funding_tx_confirmation_height != 0 {
5019                         // We handle the funding disconnection by calling best_block_updated with a height one
5020                         // below where our funding was connected, implying a reorg back to conf_height - 1.
5021                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
5022                         // We use the time field to bump the current time we set on channel updates if its
5023                         // larger. If we don't know that time has moved forward, we can just set it to the last
5024                         // time we saw and it will be ignored.
5025                         let best_time = self.context.update_time_counter;
5026                         match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
5027                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5028                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5029                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5030                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5031                                         Ok(())
5032                                 },
5033                                 Err(e) => Err(e)
5034                         }
5035                 } else {
5036                         // We never learned about the funding confirmation anyway, just ignore
5037                         Ok(())
5038                 }
5039         }
5040
5041         // Methods to get unprompted messages to send to the remote end (or where we already returned
5042         // something in the handler for the message that prompted this message):
5043
5044         pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
5045                 if !self.context.is_outbound() {
5046                         panic!("Tried to open a channel for an inbound channel?");
5047                 }
5048                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
5049                         panic!("Cannot generate an open_channel after we've moved forward");
5050                 }
5051
5052                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5053                         panic!("Tried to send an open_channel for a channel that has already advanced");
5054                 }
5055
5056                 let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5057                 let keys = self.context.get_holder_pubkeys();
5058
5059                 msgs::OpenChannel {
5060                         chain_hash,
5061                         temporary_channel_id: self.context.channel_id,
5062                         funding_satoshis: self.context.channel_value_satoshis,
5063                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
5064                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
5065                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
5066                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
5067                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
5068                         feerate_per_kw: self.context.feerate_per_kw as u32,
5069                         to_self_delay: self.context.get_holder_selected_contest_delay(),
5070                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
5071                         funding_pubkey: keys.funding_pubkey,
5072                         revocation_basepoint: keys.revocation_basepoint,
5073                         payment_point: keys.payment_point,
5074                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
5075                         htlc_basepoint: keys.htlc_basepoint,
5076                         first_per_commitment_point,
5077                         channel_flags: if self.context.config.announced_channel {1} else {0},
5078                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
5079                                 Some(script) => script.clone().into_inner(),
5080                                 None => Builder::new().into_script(),
5081                         }),
5082                         channel_type: Some(self.context.channel_type.clone()),
5083                 }
5084         }
5085
5086         pub fn inbound_is_awaiting_accept(&self) -> bool {
5087                 self.context.inbound_awaiting_accept
5088         }
5089
5090         /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
5091         pub fn set_0conf(&mut self) {
5092                 assert!(self.context.inbound_awaiting_accept);
5093                 self.context.minimum_depth = Some(0);
5094         }
5095
5096         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
5097         /// should be sent back to the counterparty node.
5098         ///
5099         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5100         pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
5101                 if self.context.is_outbound() {
5102                         panic!("Tried to send accept_channel for an outbound channel?");
5103                 }
5104                 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
5105                         panic!("Tried to send accept_channel after channel had moved forward");
5106                 }
5107                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5108                         panic!("Tried to send an accept_channel for a channel that has already advanced");
5109                 }
5110                 if !self.context.inbound_awaiting_accept {
5111                         panic!("The inbound channel has already been accepted");
5112                 }
5113
5114                 self.context.user_id = user_id;
5115                 self.context.inbound_awaiting_accept = false;
5116
5117                 self.generate_accept_channel_message()
5118         }
5119
5120         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
5121         /// inbound channel. If the intention is to accept an inbound channel, use
5122         /// [`Channel::accept_inbound_channel`] instead.
5123         ///
5124         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5125         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
5126                 let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5127                 let keys = self.context.get_holder_pubkeys();
5128
5129                 msgs::AcceptChannel {
5130                         temporary_channel_id: self.context.channel_id,
5131                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
5132                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
5133                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
5134                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
5135                         minimum_depth: self.context.minimum_depth.unwrap(),
5136                         to_self_delay: self.context.get_holder_selected_contest_delay(),
5137                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
5138                         funding_pubkey: keys.funding_pubkey,
5139                         revocation_basepoint: keys.revocation_basepoint,
5140                         payment_point: keys.payment_point,
5141                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
5142                         htlc_basepoint: keys.htlc_basepoint,
5143                         first_per_commitment_point,
5144                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
5145                                 Some(script) => script.clone().into_inner(),
5146                                 None => Builder::new().into_script(),
5147                         }),
5148                         channel_type: Some(self.context.channel_type.clone()),
5149                         #[cfg(taproot)]
5150                         next_local_nonce: None,
5151                 }
5152         }
5153
5154         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
5155         /// inbound channel without accepting it.
5156         ///
5157         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5158         #[cfg(test)]
5159         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
5160                 self.generate_accept_channel_message()
5161         }
5162
5163         /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
5164         fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5165                 let counterparty_keys = self.context.build_remote_transaction_keys();
5166                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5167                 Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5168                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5169         }
5170
5171         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5172         /// a funding_created message for the remote peer.
5173         /// Panics if called at some time other than immediately after initial handshake, if called twice,
5174         /// or if called on an inbound channel.
5175         /// Note that channel_id changes during this call!
5176         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5177         /// If an Err is returned, it is a ChannelError::Close.
5178         pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
5179                 if !self.context.is_outbound() {
5180                         panic!("Tried to create outbound funding_created message on an inbound channel!");
5181                 }
5182                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5183                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5184                 }
5185                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5186                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5187                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5188                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5189                 }
5190
5191                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5192                 self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
5193
5194                 let signature = match self.get_outbound_funding_created_signature(logger) {
5195                         Ok(res) => res,
5196                         Err(e) => {
5197                                 log_error!(logger, "Got bad signatures: {:?}!", e);
5198                                 self.context.channel_transaction_parameters.funding_outpoint = None;
5199                                 return Err(e);
5200                         }
5201                 };
5202
5203                 let temporary_channel_id = self.context.channel_id;
5204
5205                 // Now that we're past error-generating stuff, update our local state:
5206
5207                 self.context.channel_state = ChannelState::FundingCreated as u32;
5208                 self.context.channel_id = funding_txo.to_channel_id();
5209                 self.context.funding_transaction = Some(funding_transaction);
5210
5211                 Ok(msgs::FundingCreated {
5212                         temporary_channel_id,
5213                         funding_txid: funding_txo.txid,
5214                         funding_output_index: funding_txo.index,
5215                         signature,
5216                         #[cfg(taproot)]
5217                         partial_signature_with_nonce: None,
5218                         #[cfg(taproot)]
5219                         next_local_nonce: None,
5220                 })
5221         }
5222
5223         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5224         /// announceable and available for use (have exchanged ChannelReady messages in both
5225         /// directions). Should be used for both broadcasted announcements and in response to an
5226         /// AnnouncementSignatures message from the remote peer.
5227         ///
5228         /// Will only fail if we're not in a state where channel_announcement may be sent (including
5229         /// closing).
5230         ///
5231         /// This will only return ChannelError::Ignore upon failure.
5232         fn get_channel_announcement<NS: Deref>(
5233                 &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
5234         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5235                 if !self.context.config.announced_channel {
5236                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5237                 }
5238                 if !self.context.is_usable() {
5239                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5240                 }
5241
5242                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5243                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5244                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5245                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5246
5247                 let msg = msgs::UnsignedChannelAnnouncement {
5248                         features: channelmanager::provided_channel_features(&user_config),
5249                         chain_hash,
5250                         short_channel_id: self.context.get_short_channel_id().unwrap(),
5251                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5252                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5253                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5254                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5255                         excess_data: Vec::new(),
5256                 };
5257
5258                 Ok(msg)
5259         }
5260
5261         fn get_announcement_sigs<NS: Deref, L: Deref>(
5262                 &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
5263                 best_block_height: u32, logger: &L
5264         ) -> Option<msgs::AnnouncementSignatures>
5265         where
5266                 NS::Target: NodeSigner,
5267                 L::Target: Logger
5268         {
5269                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5270                         return None;
5271                 }
5272
5273                 if !self.context.is_usable() {
5274                         return None;
5275                 }
5276
5277                 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5278                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5279                         return None;
5280                 }
5281
5282                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5283                         return None;
5284                 }
5285
5286                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
5287                 let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
5288                         Ok(a) => a,
5289                         Err(e) => {
5290                                 log_trace!(logger, "{:?}", e);
5291                                 return None;
5292                         }
5293                 };
5294                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5295                         Err(_) => {
5296                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5297                                 return None;
5298                         },
5299                         Ok(v) => v
5300                 };
5301                 let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5302                         Err(_) => {
5303                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5304                                 return None;
5305                         },
5306                         Ok(v) => v
5307                 };
5308                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5309
5310                 Some(msgs::AnnouncementSignatures {
5311                         channel_id: self.context.channel_id(),
5312                         short_channel_id: self.context.get_short_channel_id().unwrap(),
5313                         node_signature: our_node_sig,
5314                         bitcoin_signature: our_bitcoin_sig,
5315                 })
5316         }
5317
5318         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5319         /// available.
5320         fn sign_channel_announcement<NS: Deref>(
5321                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5322         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5323                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5324                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5325                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5326                         let were_node_one = announcement.node_id_1 == our_node_key;
5327
5328                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5329                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5330                         let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5331                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5332                         Ok(msgs::ChannelAnnouncement {
5333                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5334                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5335                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5336                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5337                                 contents: announcement,
5338                         })
5339                 } else {
5340                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5341                 }
5342         }
5343
5344         /// Processes an incoming announcement_signatures message, providing a fully-signed
5345         /// channel_announcement message which we can broadcast and storing our counterparty's
5346         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5347         pub fn announcement_signatures<NS: Deref>(
5348                 &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
5349                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5350         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5351                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5352
5353                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5354
5355                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5356                         return Err(ChannelError::Close(format!(
5357                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5358                                  &announcement, self.context.get_counterparty_node_id())));
5359                 }
5360                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5361                         return Err(ChannelError::Close(format!(
5362                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5363                                 &announcement, self.context.counterparty_funding_pubkey())));
5364                 }
5365
5366                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5367                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5368                         return Err(ChannelError::Ignore(
5369                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5370                 }
5371
5372                 self.sign_channel_announcement(node_signer, announcement)
5373         }
5374
5375         /// Gets a signed channel_announcement for this channel, if we previously received an
5376         /// announcement_signatures from our counterparty.
5377         pub fn get_signed_channel_announcement<NS: Deref>(
5378                 &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
5379         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5380                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5381                         return None;
5382                 }
5383                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5384                         Ok(res) => res,
5385                         Err(_) => return None,
5386                 };
5387                 match self.sign_channel_announcement(node_signer, announcement) {
5388                         Ok(res) => Some(res),
5389                         Err(_) => None,
5390                 }
5391         }
5392
5393         /// May panic if called on a channel that wasn't immediately-previously
5394         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5395         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5396                 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5397                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5398                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5399                 // current to_remote balances. However, it no longer has any use, and thus is now simply
5400                 // set to a dummy (but valid, as required by the spec) public key.
5401                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5402                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5403                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5404                 let mut pk = [2; 33]; pk[1] = 0xff;
5405                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5406                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5407                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5408                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
5409                         remote_last_secret
5410                 } else {
5411                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
5412                         [0;32]
5413                 };
5414                 self.mark_awaiting_response();
5415                 msgs::ChannelReestablish {
5416                         channel_id: self.context.channel_id(),
5417                         // The protocol has two different commitment number concepts - the "commitment
5418                         // transaction number", which starts from 0 and counts up, and the "revocation key
5419                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5420                         // commitment transaction numbers by the index which will be used to reveal the
5421                         // revocation key for that commitment transaction, which means we have to convert them
5422                         // to protocol-level commitment numbers here...
5423
5424                         // next_local_commitment_number is the next commitment_signed number we expect to
5425                         // receive (indicating if they need to resend one that we missed).
5426                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5427                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5428                         // receive, however we track it by the next commitment number for a remote transaction
5429                         // (which is one further, as they always revoke previous commitment transaction, not
5430                         // the one we send) so we have to decrement by 1. Note that if
5431                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5432                         // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5433                         // overflow here.
5434                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5435                         your_last_per_commitment_secret: remote_last_secret,
5436                         my_current_per_commitment_point: dummy_pubkey,
5437                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5438                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5439                         // txid of that interactive transaction, else we MUST NOT set it.
5440                         next_funding_txid: None,
5441                 }
5442         }
5443
5444
5445         // Send stuff to our remote peers:
5446
5447         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5448         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5449         /// commitment update.
5450         ///
5451         /// `Err`s will only be [`ChannelError::Ignore`].
5452         pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5453                 onion_routing_packet: msgs::OnionPacket, logger: &L)
5454         -> Result<(), ChannelError> where L::Target: Logger {
5455                 self
5456                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
5457                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5458                         .map_err(|err| {
5459                                 if let ChannelError::Ignore(_) = err { /* fine */ }
5460                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5461                                 err
5462                         })
5463         }
5464
5465         /// Adds a pending outbound HTLC to this channel, note that you probably want
5466         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5467         ///
5468         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5469         /// the wire:
5470         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5471         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5472         ///   awaiting ACK.
5473         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5474         ///   we may not yet have sent the previous commitment update messages and will need to
5475         ///   regenerate them.
5476         ///
5477         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5478         /// on this [`Channel`] if `force_holding_cell` is false.
5479         ///
5480         /// `Err`s will only be [`ChannelError::Ignore`].
5481         fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5482                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
5483         -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
5484                 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5485                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5486                 }
5487                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5488                 if amount_msat > channel_total_msat {
5489                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5490                 }
5491
5492                 if amount_msat == 0 {
5493                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5494                 }
5495
5496                 let available_balances = self.context.get_available_balances();
5497                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5498                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5499                                 available_balances.next_outbound_htlc_minimum_msat)));
5500                 }
5501
5502                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5503                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5504                                 available_balances.next_outbound_htlc_limit_msat)));
5505                 }
5506
5507                 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5508                         // Note that this should never really happen, if we're !is_live() on receipt of an
5509                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5510                         // the user to send directly into a !is_live() channel. However, if we
5511                         // disconnected during the time the previous hop was doing the commitment dance we may
5512                         // end up getting here after the forwarding delay. In any case, returning an
5513                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5514                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5515                 }
5516
5517                 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5518                 log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
5519                         if force_holding_cell { "into holding cell" }
5520                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5521                         else { "to peer" });
5522
5523                 if need_holding_cell {
5524                         force_holding_cell = true;
5525                 }
5526
5527                 // Now update local state:
5528                 if force_holding_cell {
5529                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5530                                 amount_msat,
5531                                 payment_hash,
5532                                 cltv_expiry,
5533                                 source,
5534                                 onion_routing_packet,
5535                         });
5536                         return Ok(None);
5537                 }
5538
5539                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5540                         htlc_id: self.context.next_holder_htlc_id,
5541                         amount_msat,
5542                         payment_hash: payment_hash.clone(),
5543                         cltv_expiry,
5544                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5545                         source,
5546                 });
5547
5548                 let res = msgs::UpdateAddHTLC {
5549                         channel_id: self.context.channel_id,
5550                         htlc_id: self.context.next_holder_htlc_id,
5551                         amount_msat,
5552                         payment_hash,
5553                         cltv_expiry,
5554                         onion_routing_packet,
5555                 };
5556                 self.context.next_holder_htlc_id += 1;
5557
5558                 Ok(Some(res))
5559         }
5560
5561         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5562                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5563                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5564                 // fail to generate this, we still are at least at a position where upgrading their status
5565                 // is acceptable.
5566                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5567                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5568                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5569                         } else { None };
5570                         if let Some(state) = new_state {
5571                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
5572                                 htlc.state = state;
5573                         }
5574                 }
5575                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5576                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5577                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
5578                                 // Grab the preimage, if it exists, instead of cloning
5579                                 let mut reason = OutboundHTLCOutcome::Success(None);
5580                                 mem::swap(outcome, &mut reason);
5581                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5582                         }
5583                 }
5584                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5585                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5586                                 debug_assert!(!self.context.is_outbound());
5587                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5588                                 self.context.feerate_per_kw = feerate;
5589                                 self.context.pending_update_fee = None;
5590                         }
5591                 }
5592                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5593
5594                 let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
5595                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5596                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5597
5598                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5599                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5600                 }
5601
5602                 self.context.latest_monitor_update_id += 1;
5603                 let monitor_update = ChannelMonitorUpdate {
5604                         update_id: self.context.latest_monitor_update_id,
5605                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5606                                 commitment_txid: counterparty_commitment_txid,
5607                                 htlc_outputs: htlcs.clone(),
5608                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5609                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
5610                         }]
5611                 };
5612                 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5613                 monitor_update
5614         }
5615
5616         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
5617                 let counterparty_keys = self.context.build_remote_transaction_keys();
5618                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5619                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5620
5621                 #[cfg(any(test, fuzzing))]
5622                 {
5623                         if !self.context.is_outbound() {
5624                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5625                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5626                                 if let Some(info) = projected_commit_tx_info {
5627                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5628                                         if info.total_pending_htlcs == total_pending_htlcs
5629                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5630                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5631                                                 && info.feerate == self.context.feerate_per_kw {
5632                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
5633                                                         assert_eq!(actual_fee, info.fee);
5634                                                 }
5635                                 }
5636                         }
5637                 }
5638
5639                 (counterparty_commitment_txid, commitment_stats.htlcs_included)
5640         }
5641
5642         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5643         /// generation when we shouldn't change HTLC/channel state.
5644         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5645                 // Get the fee tests from `build_commitment_no_state_update`
5646                 #[cfg(any(test, fuzzing))]
5647                 self.build_commitment_no_state_update(logger);
5648
5649                 let counterparty_keys = self.context.build_remote_transaction_keys();
5650                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5651                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5652                 let (signature, htlc_signatures);
5653
5654                 {
5655                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5656                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5657                                 htlcs.push(htlc);
5658                         }
5659
5660                         let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5661                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
5662                         signature = res.0;
5663                         htlc_signatures = res.1;
5664
5665                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5666                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5667                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5668                                 log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
5669
5670                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5671                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5672                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5673                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
5674                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5675                                         log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
5676                         }
5677                 }
5678
5679                 Ok((msgs::CommitmentSigned {
5680                         channel_id: self.context.channel_id,
5681                         signature,
5682                         htlc_signatures,
5683                         #[cfg(taproot)]
5684                         partial_signature_with_nonce: None,
5685                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5686         }
5687
5688         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5689         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5690         ///
5691         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5692         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5693         pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
5694                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
5695                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5696                 match send_res? {
5697                         Some(_) => {
5698                                 let monitor_update = self.build_commitment_no_status_check(logger);
5699                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5700                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
5701                         },
5702                         None => Ok(None)
5703                 }
5704         }
5705
5706         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
5707                 if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
5708                         return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
5709                 }
5710                 self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
5711                         fee_base_msat: msg.contents.fee_base_msat,
5712                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5713                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
5714                 });
5715
5716                 Ok(())
5717         }
5718
5719         /// Begins the shutdown process, getting a message for the remote peer and returning all
5720         /// holding cell HTLCs for payment failure.
5721         ///
5722         /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5723         /// [`ChannelMonitorUpdate`] will be returned).
5724         pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5725                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5726         -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
5727         where SP::Target: SignerProvider {
5728                 for htlc in self.context.pending_outbound_htlcs.iter() {
5729                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5730                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5731                         }
5732                 }
5733                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5734                         if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5735                                 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5736                         }
5737                         else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5738                                 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5739                         }
5740                 }
5741                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5742                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5743                 }
5744                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5745                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5746                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5747                 }
5748
5749                 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5750                 // script is set, we just force-close and call it a day.
5751                 let mut chan_closed = false;
5752                 if self.context.channel_state < ChannelState::FundingSent as u32 {
5753                         chan_closed = true;
5754                 }
5755
5756                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5757                         Some(_) => false,
5758                         None if !chan_closed => {
5759                                 // use override shutdown script if provided
5760                                 let shutdown_scriptpubkey = match override_shutdown_script {
5761                                         Some(script) => script,
5762                                         None => {
5763                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
5764                                                 match signer_provider.get_shutdown_scriptpubkey() {
5765                                                         Ok(scriptpubkey) => scriptpubkey,
5766                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5767                                                 }
5768                                         },
5769                                 };
5770                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5771                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5772                                 }
5773                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5774                                 true
5775                         },
5776                         None => false,
5777                 };
5778
5779                 // From here on out, we may not fail!
5780                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5781                 if self.context.channel_state < ChannelState::FundingSent as u32 {
5782                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
5783                 } else {
5784                         self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5785                 }
5786                 self.context.update_time_counter += 1;
5787
5788                 let monitor_update = if update_shutdown_script {
5789                         self.context.latest_monitor_update_id += 1;
5790                         let monitor_update = ChannelMonitorUpdate {
5791                                 update_id: self.context.latest_monitor_update_id,
5792                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5793                                         scriptpubkey: self.get_closing_scriptpubkey(),
5794                                 }],
5795                         };
5796                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5797                         if self.push_blockable_mon_update(monitor_update) {
5798                                 self.context.pending_monitor_updates.last().map(|upd| &upd.update)
5799                         } else { None }
5800                 } else { None };
5801                 let shutdown = msgs::Shutdown {
5802                         channel_id: self.context.channel_id,
5803                         scriptpubkey: self.get_closing_scriptpubkey(),
5804                 };
5805
5806                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5807                 // our shutdown until we've committed all of the pending changes.
5808                 self.context.holding_cell_update_fee = None;
5809                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5810                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5811                         match htlc_update {
5812                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5813                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5814                                         false
5815                                 },
5816                                 _ => true
5817                         }
5818                 });
5819
5820                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5821                         "we can't both complete shutdown and return a monitor update");
5822
5823                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5824         }
5825
5826         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
5827         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
5828         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
5829         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
5830         /// immediately (others we will have to allow to time out).
5831         pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
5832                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
5833                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
5834                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
5835                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
5836                 assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32);
5837
5838                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
5839                 // return them to fail the payment.
5840                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5841                 let counterparty_node_id = self.context.get_counterparty_node_id();
5842                 for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
5843                         match htlc_update {
5844                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
5845                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id));
5846                                 },
5847                                 _ => {}
5848                         }
5849                 }
5850                 let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
5851                         // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
5852                         // returning a channel monitor update here would imply a channel monitor update before
5853                         // we even registered the channel monitor to begin with, which is invalid.
5854                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
5855                         // funding transaction, don't return a funding txo (which prevents providing the
5856                         // monitor update to the user, even if we return one).
5857                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
5858                         if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
5859                                 self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
5860                                 Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
5861                                         update_id: self.context.latest_monitor_update_id,
5862                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
5863                                 }))
5864                         } else { None }
5865                 } else { None };
5866
5867                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5868                 self.context.update_time_counter += 1;
5869                 (monitor_update, dropped_outbound_htlcs)
5870         }
5871
5872         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5873                 self.context.holding_cell_htlc_updates.iter()
5874                         .flat_map(|htlc_update| {
5875                                 match htlc_update {
5876                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5877                                                 => Some((source, payment_hash)),
5878                                         _ => None,
5879                                 }
5880                         })
5881                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5882         }
5883 }
5884
5885 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5886 pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
5887         pub context: ChannelContext<Signer>,
5888 }
5889
5890 impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
5891         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
5892                 // The default channel type (ie the first one we try) depends on whether the channel is
5893                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
5894                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
5895                 // with no other changes, and fall back to `only_static_remotekey`.
5896                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
5897                 if !config.channel_handshake_config.announced_channel &&
5898                         config.channel_handshake_config.negotiate_scid_privacy &&
5899                         their_features.supports_scid_privacy() {
5900                         ret.set_scid_privacy_required();
5901                 }
5902
5903                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
5904                 // set it now. If they don't understand it, we'll fall back to our default of
5905                 // `only_static_remotekey`.
5906                 #[cfg(anchors)]
5907                 { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
5908                         if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
5909                                 their_features.supports_anchors_zero_fee_htlc_tx() {
5910                                 ret.set_anchors_zero_fee_htlc_tx_required();
5911                         }
5912                 }
5913
5914                 ret
5915         }
5916
5917         pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
5918                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5919                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5920                 outbound_scid_alias: u64
5921         ) -> Result<Channel<Signer>, APIError>
5922         where ES::Target: EntropySource,
5923               SP::Target: SignerProvider<Signer = Signer>,
5924               F::Target: FeeEstimator,
5925         {
5926                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5927                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5928                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5929                 let pubkeys = holder_signer.pubkeys().clone();
5930
5931                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5932                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5933                 }
5934                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5935                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5936                 }
5937                 let channel_value_msat = channel_value_satoshis * 1000;
5938                 if push_msat > channel_value_msat {
5939                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5940                 }
5941                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5942                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5943                 }
5944                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5945                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5946                         // Protocol level safety check in place, although it should never happen because
5947                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5948                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5949                 }
5950
5951                 let channel_type = Self::get_initial_channel_type(&config, their_features);
5952                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5953
5954                 let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
5955
5956                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5957                 let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
5958                 if value_to_self_msat < commitment_tx_fee {
5959                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5960                 }
5961
5962                 let mut secp_ctx = Secp256k1::new();
5963                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5964
5965                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5966                         match signer_provider.get_shutdown_scriptpubkey() {
5967                                 Ok(scriptpubkey) => Some(scriptpubkey),
5968                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5969                         }
5970                 } else { None };
5971
5972                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5973                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
5974                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5975                         }
5976                 }
5977
5978                 let destination_script = match signer_provider.get_destination_script() {
5979                         Ok(script) => script,
5980                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5981                 };
5982
5983                 let temporary_channel_id = entropy_source.get_secure_random_bytes();
5984
5985                 Ok(Channel {
5986                         context: ChannelContext {
5987                                 user_id,
5988
5989                                 config: LegacyChannelConfig {
5990                                         options: config.channel_config.clone(),
5991                                         announced_channel: config.channel_handshake_config.announced_channel,
5992                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5993                                 },
5994
5995                                 prev_config: None,
5996
5997                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5998
5999                                 channel_id: temporary_channel_id,
6000                                 temporary_channel_id: Some(temporary_channel_id),
6001                                 channel_state: ChannelState::OurInitSent as u32,
6002                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6003                                 secp_ctx,
6004                                 channel_value_satoshis,
6005
6006                                 latest_monitor_update_id: 0,
6007
6008                                 holder_signer,
6009                                 shutdown_scriptpubkey,
6010                                 destination_script,
6011
6012                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6013                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6014                                 value_to_self_msat,
6015
6016                                 pending_inbound_htlcs: Vec::new(),
6017                                 pending_outbound_htlcs: Vec::new(),
6018                                 holding_cell_htlc_updates: Vec::new(),
6019                                 pending_update_fee: None,
6020                                 holding_cell_update_fee: None,
6021                                 next_holder_htlc_id: 0,
6022                                 next_counterparty_htlc_id: 0,
6023                                 update_time_counter: 1,
6024
6025                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6026
6027                                 monitor_pending_channel_ready: false,
6028                                 monitor_pending_revoke_and_ack: false,
6029                                 monitor_pending_commitment_signed: false,
6030                                 monitor_pending_forwards: Vec::new(),
6031                                 monitor_pending_failures: Vec::new(),
6032                                 monitor_pending_finalized_fulfills: Vec::new(),
6033
6034                                 #[cfg(debug_assertions)]
6035                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6036                                 #[cfg(debug_assertions)]
6037                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6038
6039                                 last_sent_closing_fee: None,
6040                                 pending_counterparty_closing_signed: None,
6041                                 closing_fee_limits: None,
6042                                 target_closing_feerate_sats_per_kw: None,
6043
6044                                 inbound_awaiting_accept: false,
6045
6046                                 funding_tx_confirmed_in: None,
6047                                 funding_tx_confirmation_height: 0,
6048                                 short_channel_id: None,
6049                                 channel_creation_height: current_chain_height,
6050
6051                                 feerate_per_kw: feerate,
6052                                 counterparty_dust_limit_satoshis: 0,
6053                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6054                                 counterparty_max_htlc_value_in_flight_msat: 0,
6055                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6056                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6057                                 holder_selected_channel_reserve_satoshis,
6058                                 counterparty_htlc_minimum_msat: 0,
6059                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6060                                 counterparty_max_accepted_htlcs: 0,
6061                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6062                                 minimum_depth: None, // Filled in in accept_channel
6063
6064                                 counterparty_forwarding_info: None,
6065
6066                                 channel_transaction_parameters: ChannelTransactionParameters {
6067                                         holder_pubkeys: pubkeys,
6068                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6069                                         is_outbound_from_holder: true,
6070                                         counterparty_parameters: None,
6071                                         funding_outpoint: None,
6072                                         opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
6073                                         opt_non_zero_fee_anchors: None
6074                                 },
6075                                 funding_transaction: None,
6076
6077                                 counterparty_cur_commitment_point: None,
6078                                 counterparty_prev_commitment_point: None,
6079                                 counterparty_node_id,
6080
6081                                 counterparty_shutdown_scriptpubkey: None,
6082
6083                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6084
6085                                 channel_update_status: ChannelUpdateStatus::Enabled,
6086                                 closing_signed_in_flight: false,
6087
6088                                 announcement_sigs: None,
6089
6090                                 #[cfg(any(test, fuzzing))]
6091                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6092                                 #[cfg(any(test, fuzzing))]
6093                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6094
6095                                 workaround_lnd_bug_4006: None,
6096                                 sent_message_awaiting_response: None,
6097
6098                                 latest_inbound_scid_alias: None,
6099                                 outbound_scid_alias,
6100
6101                                 channel_pending_event_emitted: false,
6102                                 channel_ready_event_emitted: false,
6103
6104                                 #[cfg(any(test, fuzzing))]
6105                                 historical_inbound_htlc_fulfills: HashSet::new(),
6106
6107                                 channel_type,
6108                                 channel_keys_id,
6109
6110                                 pending_monitor_updates: Vec::new(),
6111                         }
6112                 })
6113         }
6114 }
6115
6116 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6117 pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
6118         pub context: ChannelContext<Signer>,
6119 }
6120
6121 impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
6122         /// Creates a new channel from a remote sides' request for one.
6123         /// Assumes chain_hash has already been checked and corresponds with what we expect!
6124         pub fn new_from_req<ES: Deref, SP: Deref, F: Deref, L: Deref>(
6125                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6126                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6127                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6128                 current_chain_height: u32, logger: &L, outbound_scid_alias: u64
6129         ) -> Result<Channel<Signer>, ChannelError>
6130                 where ES::Target: EntropySource,
6131                           SP::Target: SignerProvider<Signer = Signer>,
6132                           F::Target: FeeEstimator,
6133                           L::Target: Logger,
6134         {
6135                 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6136
6137                 // First check the channel type is known, failing before we do anything else if we don't
6138                 // support this channel type.
6139                 let channel_type = if let Some(channel_type) = &msg.channel_type {
6140                         if channel_type.supports_any_optional_bits() {
6141                                 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6142                         }
6143
6144                         // We only support the channel types defined by the `ChannelManager` in
6145                         // `provided_channel_type_features`. The channel type must always support
6146                         // `static_remote_key`.
6147                         if !channel_type.requires_static_remote_key() {
6148                                 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6149                         }
6150                         // Make sure we support all of the features behind the channel type.
6151                         if !channel_type.is_subset(our_supported_features) {
6152                                 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6153                         }
6154                         if channel_type.requires_scid_privacy() && announced_channel {
6155                                 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6156                         }
6157                         channel_type.clone()
6158                 } else {
6159                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6160                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6161                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6162                         }
6163                         channel_type
6164                 };
6165                 let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
6166
6167                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6168                 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6169                 let pubkeys = holder_signer.pubkeys().clone();
6170                 let counterparty_pubkeys = ChannelPublicKeys {
6171                         funding_pubkey: msg.funding_pubkey,
6172                         revocation_basepoint: msg.revocation_basepoint,
6173                         payment_point: msg.payment_point,
6174                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6175                         htlc_basepoint: msg.htlc_basepoint
6176                 };
6177
6178                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6179                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6180                 }
6181
6182                 // Check sanity of message fields:
6183                 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6184                         return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6185                 }
6186                 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6187                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6188                 }
6189                 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6190                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6191                 }
6192                 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6193                 if msg.push_msat > full_channel_value_msat {
6194                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6195                 }
6196                 if msg.dust_limit_satoshis > msg.funding_satoshis {
6197                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6198                 }
6199                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6200                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6201                 }
6202                 Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
6203
6204                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6205                 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6206                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6207                 }
6208                 if msg.max_accepted_htlcs < 1 {
6209                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6210                 }
6211                 if msg.max_accepted_htlcs > MAX_HTLCS {
6212                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6213                 }
6214
6215                 // Now check against optional parameters as set by config...
6216                 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6217                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6218                 }
6219                 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6220                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
6221                 }
6222                 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6223                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6224                 }
6225                 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6226                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6227                 }
6228                 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6229                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6230                 }
6231                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6232                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6233                 }
6234                 if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
6235                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6236                 }
6237
6238                 // Convert things into internal flags and prep our state:
6239
6240                 if config.channel_handshake_limits.force_announced_channel_preference {
6241                         if config.channel_handshake_config.announced_channel != announced_channel {
6242                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6243                         }
6244                 }
6245
6246                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6247                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6248                         // Protocol level safety check in place, although it should never happen because
6249                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6250                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6251                 }
6252                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6253                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6254                 }
6255                 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6256                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6257                                 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6258                 }
6259                 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6260                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6261                 }
6262
6263                 // check if the funder's amount for the initial commitment tx is sufficient
6264                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6265                 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6266                 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
6267                 if funders_amount_msat / 1000 < commitment_tx_fee {
6268                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
6269                 }
6270
6271                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
6272                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6273                 // want to push much to us), our counterparty should always have more than our reserve.
6274                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6275                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6276                 }
6277
6278                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6279                         match &msg.shutdown_scriptpubkey {
6280                                 &Some(ref script) => {
6281                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6282                                         if script.len() == 0 {
6283                                                 None
6284                                         } else {
6285                                                 if !script::is_bolt2_compliant(&script, their_features) {
6286                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6287                                                 }
6288                                                 Some(script.clone())
6289                                         }
6290                                 },
6291                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6292                                 &None => {
6293                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6294                                 }
6295                         }
6296                 } else { None };
6297
6298                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6299                         match signer_provider.get_shutdown_scriptpubkey() {
6300                                 Ok(scriptpubkey) => Some(scriptpubkey),
6301                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6302                         }
6303                 } else { None };
6304
6305                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6306                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6307                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6308                         }
6309                 }
6310
6311                 let destination_script = match signer_provider.get_destination_script() {
6312                         Ok(script) => script,
6313                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6314                 };
6315
6316                 let mut secp_ctx = Secp256k1::new();
6317                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6318
6319                 let chan = Channel {
6320                         context: ChannelContext {
6321                                 user_id,
6322
6323                                 config: LegacyChannelConfig {
6324                                         options: config.channel_config.clone(),
6325                                         announced_channel,
6326                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6327                                 },
6328
6329                                 prev_config: None,
6330
6331                                 inbound_handshake_limits_override: None,
6332
6333                                 temporary_channel_id: Some(msg.temporary_channel_id),
6334                                 channel_id: msg.temporary_channel_id,
6335                                 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6336                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6337                                 secp_ctx,
6338
6339                                 latest_monitor_update_id: 0,
6340
6341                                 holder_signer,
6342                                 shutdown_scriptpubkey,
6343                                 destination_script,
6344
6345                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6346                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6347                                 value_to_self_msat: msg.push_msat,
6348
6349                                 pending_inbound_htlcs: Vec::new(),
6350                                 pending_outbound_htlcs: Vec::new(),
6351                                 holding_cell_htlc_updates: Vec::new(),
6352                                 pending_update_fee: None,
6353                                 holding_cell_update_fee: None,
6354                                 next_holder_htlc_id: 0,
6355                                 next_counterparty_htlc_id: 0,
6356                                 update_time_counter: 1,
6357
6358                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6359
6360                                 monitor_pending_channel_ready: false,
6361                                 monitor_pending_revoke_and_ack: false,
6362                                 monitor_pending_commitment_signed: false,
6363                                 monitor_pending_forwards: Vec::new(),
6364                                 monitor_pending_failures: Vec::new(),
6365                                 monitor_pending_finalized_fulfills: Vec::new(),
6366
6367                                 #[cfg(debug_assertions)]
6368                                 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6369                                 #[cfg(debug_assertions)]
6370                                 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6371
6372                                 last_sent_closing_fee: None,
6373                                 pending_counterparty_closing_signed: None,
6374                                 closing_fee_limits: None,
6375                                 target_closing_feerate_sats_per_kw: None,
6376
6377                                 inbound_awaiting_accept: true,
6378
6379                                 funding_tx_confirmed_in: None,
6380                                 funding_tx_confirmation_height: 0,
6381                                 short_channel_id: None,
6382                                 channel_creation_height: current_chain_height,
6383
6384                                 feerate_per_kw: msg.feerate_per_kw,
6385                                 channel_value_satoshis: msg.funding_satoshis,
6386                                 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6387                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6388                                 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6389                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6390                                 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6391                                 holder_selected_channel_reserve_satoshis,
6392                                 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6393                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6394                                 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6395                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6396                                 minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
6397
6398                                 counterparty_forwarding_info: None,
6399
6400                                 channel_transaction_parameters: ChannelTransactionParameters {
6401                                         holder_pubkeys: pubkeys,
6402                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6403                                         is_outbound_from_holder: false,
6404                                         counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6405                                                 selected_contest_delay: msg.to_self_delay,
6406                                                 pubkeys: counterparty_pubkeys,
6407                                         }),
6408                                         funding_outpoint: None,
6409                                         opt_anchors: if opt_anchors { Some(()) } else { None },
6410                                         opt_non_zero_fee_anchors: None
6411                                 },
6412                                 funding_transaction: None,
6413
6414                                 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6415                                 counterparty_prev_commitment_point: None,
6416                                 counterparty_node_id,
6417
6418                                 counterparty_shutdown_scriptpubkey,
6419
6420                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6421
6422                                 channel_update_status: ChannelUpdateStatus::Enabled,
6423                                 closing_signed_in_flight: false,
6424
6425                                 announcement_sigs: None,
6426
6427                                 #[cfg(any(test, fuzzing))]
6428                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6429                                 #[cfg(any(test, fuzzing))]
6430                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6431
6432                                 workaround_lnd_bug_4006: None,
6433                                 sent_message_awaiting_response: None,
6434
6435                                 latest_inbound_scid_alias: None,
6436                                 outbound_scid_alias,
6437
6438                                 channel_pending_event_emitted: false,
6439                                 channel_ready_event_emitted: false,
6440
6441                                 #[cfg(any(test, fuzzing))]
6442                                 historical_inbound_htlc_fulfills: HashSet::new(),
6443
6444                                 channel_type,
6445                                 channel_keys_id,
6446
6447                                 pending_monitor_updates: Vec::new(),
6448                         }
6449                 };
6450
6451                 Ok(chan)
6452         }
6453 }
6454
6455 const SERIALIZATION_VERSION: u8 = 3;
6456 const MIN_SERIALIZATION_VERSION: u8 = 2;
6457
6458 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6459         (0, FailRelay),
6460         (1, FailMalformed),
6461         (2, Fulfill),
6462 );
6463
6464 impl Writeable for ChannelUpdateStatus {
6465         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6466                 // We only care about writing out the current state as it was announced, ie only either
6467                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6468                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6469                 match self {
6470                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6471                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6472                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6473                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6474                 }
6475                 Ok(())
6476         }
6477 }
6478
6479 impl Readable for ChannelUpdateStatus {
6480         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6481                 Ok(match <u8 as Readable>::read(reader)? {
6482                         0 => ChannelUpdateStatus::Enabled,
6483                         1 => ChannelUpdateStatus::Disabled,
6484                         _ => return Err(DecodeError::InvalidValue),
6485                 })
6486         }
6487 }
6488
6489 impl Writeable for AnnouncementSigsState {
6490         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6491                 // We only care about writing out the current state as if we had just disconnected, at
6492                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6493                 match self {
6494                         AnnouncementSigsState::NotSent => 0u8.write(writer),
6495                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
6496                         AnnouncementSigsState::Committed => 0u8.write(writer),
6497                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6498                 }
6499         }
6500 }
6501
6502 impl Readable for AnnouncementSigsState {
6503         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6504                 Ok(match <u8 as Readable>::read(reader)? {
6505                         0 => AnnouncementSigsState::NotSent,
6506                         1 => AnnouncementSigsState::PeerReceived,
6507                         _ => return Err(DecodeError::InvalidValue),
6508                 })
6509         }
6510 }
6511
6512 impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
6513         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6514                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6515                 // called.
6516
6517                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6518
6519                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6520                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6521                 // the low bytes now and the optional high bytes later.
6522                 let user_id_low = self.context.user_id as u64;
6523                 user_id_low.write(writer)?;
6524
6525                 // Version 1 deserializers expected to read parts of the config object here. Version 2
6526                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6527                 // `minimum_depth` we simply write dummy values here.
6528                 writer.write_all(&[0; 8])?;
6529
6530                 self.context.channel_id.write(writer)?;
6531                 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6532                 self.context.channel_value_satoshis.write(writer)?;
6533
6534                 self.context.latest_monitor_update_id.write(writer)?;
6535
6536                 let mut key_data = VecWriter(Vec::new());
6537                 self.context.holder_signer.write(&mut key_data)?;
6538                 assert!(key_data.0.len() < core::usize::MAX);
6539                 assert!(key_data.0.len() < core::u32::MAX as usize);
6540                 (key_data.0.len() as u32).write(writer)?;
6541                 writer.write_all(&key_data.0[..])?;
6542
6543                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6544                 // deserialized from that format.
6545                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6546                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6547                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6548                 }
6549                 self.context.destination_script.write(writer)?;
6550
6551                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6552                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6553                 self.context.value_to_self_msat.write(writer)?;
6554
6555                 let mut dropped_inbound_htlcs = 0;
6556                 for htlc in self.context.pending_inbound_htlcs.iter() {
6557                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6558                                 dropped_inbound_htlcs += 1;
6559                         }
6560                 }
6561                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6562                 for htlc in self.context.pending_inbound_htlcs.iter() {
6563                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6564                                 continue; // Drop
6565                         }
6566                         htlc.htlc_id.write(writer)?;
6567                         htlc.amount_msat.write(writer)?;
6568                         htlc.cltv_expiry.write(writer)?;
6569                         htlc.payment_hash.write(writer)?;
6570                         match &htlc.state {
6571                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6572                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6573                                         1u8.write(writer)?;
6574                                         htlc_state.write(writer)?;
6575                                 },
6576                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6577                                         2u8.write(writer)?;
6578                                         htlc_state.write(writer)?;
6579                                 },
6580                                 &InboundHTLCState::Committed => {
6581                                         3u8.write(writer)?;
6582                                 },
6583                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6584                                         4u8.write(writer)?;
6585                                         removal_reason.write(writer)?;
6586                                 },
6587                         }
6588                 }
6589
6590                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6591
6592                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6593                 for htlc in self.context.pending_outbound_htlcs.iter() {
6594                         htlc.htlc_id.write(writer)?;
6595                         htlc.amount_msat.write(writer)?;
6596                         htlc.cltv_expiry.write(writer)?;
6597                         htlc.payment_hash.write(writer)?;
6598                         htlc.source.write(writer)?;
6599                         match &htlc.state {
6600                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6601                                         0u8.write(writer)?;
6602                                         onion_packet.write(writer)?;
6603                                 },
6604                                 &OutboundHTLCState::Committed => {
6605                                         1u8.write(writer)?;
6606                                 },
6607                                 &OutboundHTLCState::RemoteRemoved(_) => {
6608                                         // Treat this as a Committed because we haven't received the CS - they'll
6609                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6610                                         1u8.write(writer)?;
6611                                 },
6612                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6613                                         3u8.write(writer)?;
6614                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6615                                                 preimages.push(preimage);
6616                                         }
6617                                         let reason: Option<&HTLCFailReason> = outcome.into();
6618                                         reason.write(writer)?;
6619                                 }
6620                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6621                                         4u8.write(writer)?;
6622                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6623                                                 preimages.push(preimage);
6624                                         }
6625                                         let reason: Option<&HTLCFailReason> = outcome.into();
6626                                         reason.write(writer)?;
6627                                 }
6628                         }
6629                 }
6630
6631                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
6632                 for update in self.context.holding_cell_htlc_updates.iter() {
6633                         match update {
6634                                 &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
6635                                         0u8.write(writer)?;
6636                                         amount_msat.write(writer)?;
6637                                         cltv_expiry.write(writer)?;
6638                                         payment_hash.write(writer)?;
6639                                         source.write(writer)?;
6640                                         onion_routing_packet.write(writer)?;
6641                                 },
6642                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
6643                                         1u8.write(writer)?;
6644                                         payment_preimage.write(writer)?;
6645                                         htlc_id.write(writer)?;
6646                                 },
6647                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
6648                                         2u8.write(writer)?;
6649                                         htlc_id.write(writer)?;
6650                                         err_packet.write(writer)?;
6651                                 }
6652                         }
6653                 }
6654
6655                 match self.context.resend_order {
6656                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
6657                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
6658                 }
6659
6660                 self.context.monitor_pending_channel_ready.write(writer)?;
6661                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
6662                 self.context.monitor_pending_commitment_signed.write(writer)?;
6663
6664                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
6665                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
6666                         pending_forward.write(writer)?;
6667                         htlc_id.write(writer)?;
6668                 }
6669
6670                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
6671                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
6672                         htlc_source.write(writer)?;
6673                         payment_hash.write(writer)?;
6674                         fail_reason.write(writer)?;
6675                 }
6676
6677                 if self.context.is_outbound() {
6678                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
6679                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
6680                         Some(feerate).write(writer)?;
6681                 } else {
6682                         // As for inbound HTLCs, if the update was only announced and never committed in a
6683                         // commitment_signed, drop it.
6684                         None::<u32>.write(writer)?;
6685                 }
6686                 self.context.holding_cell_update_fee.write(writer)?;
6687
6688                 self.context.next_holder_htlc_id.write(writer)?;
6689                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
6690                 self.context.update_time_counter.write(writer)?;
6691                 self.context.feerate_per_kw.write(writer)?;
6692
6693                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
6694                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
6695                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
6696                 // consider the stale state on reload.
6697                 0u8.write(writer)?;
6698
6699                 self.context.funding_tx_confirmed_in.write(writer)?;
6700                 self.context.funding_tx_confirmation_height.write(writer)?;
6701                 self.context.short_channel_id.write(writer)?;
6702
6703                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
6704                 self.context.holder_dust_limit_satoshis.write(writer)?;
6705                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
6706
6707                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
6708                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
6709
6710                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
6711                 self.context.holder_htlc_minimum_msat.write(writer)?;
6712                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
6713
6714                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
6715                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
6716
6717                 match &self.context.counterparty_forwarding_info {
6718                         Some(info) => {
6719                                 1u8.write(writer)?;
6720                                 info.fee_base_msat.write(writer)?;
6721                                 info.fee_proportional_millionths.write(writer)?;
6722                                 info.cltv_expiry_delta.write(writer)?;
6723                         },
6724                         None => 0u8.write(writer)?
6725                 }
6726
6727                 self.context.channel_transaction_parameters.write(writer)?;
6728                 self.context.funding_transaction.write(writer)?;
6729
6730                 self.context.counterparty_cur_commitment_point.write(writer)?;
6731                 self.context.counterparty_prev_commitment_point.write(writer)?;
6732                 self.context.counterparty_node_id.write(writer)?;
6733
6734                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
6735
6736                 self.context.commitment_secrets.write(writer)?;
6737
6738                 self.context.channel_update_status.write(writer)?;
6739
6740                 #[cfg(any(test, fuzzing))]
6741                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
6742                 #[cfg(any(test, fuzzing))]
6743                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
6744                         htlc.write(writer)?;
6745                 }
6746
6747                 // If the channel type is something other than only-static-remote-key, then we need to have
6748                 // older clients fail to deserialize this channel at all. If the type is
6749                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
6750                 // out at all.
6751                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
6752                         Some(&self.context.channel_type) } else { None };
6753
6754                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
6755                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
6756                 // a different percentage of the channel value then 10%, which older versions of LDK used
6757                 // to set it to before the percentage was made configurable.
6758                 let serialized_holder_selected_reserve =
6759                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
6760                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
6761
6762                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
6763                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
6764                 let serialized_holder_htlc_max_in_flight =
6765                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
6766                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
6767
6768                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
6769                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
6770
6771                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6772                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
6773                 // we write the high bytes as an option here.
6774                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
6775
6776                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
6777
6778                 write_tlv_fields!(writer, {
6779                         (0, self.context.announcement_sigs, option),
6780                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
6781                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
6782                         // them twice, once with their original default values above, and once as an option
6783                         // here. On the read side, old versions will simply ignore the odd-type entries here,
6784                         // and new versions map the default values to None and allow the TLV entries here to
6785                         // override that.
6786                         (1, self.context.minimum_depth, option),
6787                         (2, chan_type, option),
6788                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
6789                         (4, serialized_holder_selected_reserve, option),
6790                         (5, self.context.config, required),
6791                         (6, serialized_holder_htlc_max_in_flight, option),
6792                         (7, self.context.shutdown_scriptpubkey, option),
6793                         (9, self.context.target_closing_feerate_sats_per_kw, option),
6794                         (11, self.context.monitor_pending_finalized_fulfills, vec_type),
6795                         (13, self.context.channel_creation_height, required),
6796                         (15, preimages, vec_type),
6797                         (17, self.context.announcement_sigs_state, required),
6798                         (19, self.context.latest_inbound_scid_alias, option),
6799                         (21, self.context.outbound_scid_alias, required),
6800                         (23, channel_ready_event_emitted, option),
6801                         (25, user_id_high_opt, option),
6802                         (27, self.context.channel_keys_id, required),
6803                         (28, holder_max_accepted_htlcs, option),
6804                         (29, self.context.temporary_channel_id, option),
6805                         (31, channel_pending_event_emitted, option),
6806                         (33, self.context.pending_monitor_updates, vec_type),
6807                 });
6808
6809                 Ok(())
6810         }
6811 }
6812
6813 const MAX_ALLOC_SIZE: usize = 64*1024;
6814 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<<SP::Target as SignerProvider>::Signer>
6815                 where
6816                         ES::Target: EntropySource,
6817                         SP::Target: SignerProvider
6818 {
6819         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
6820                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
6821                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
6822
6823                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6824                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
6825                 // the low bytes now and the high bytes later.
6826                 let user_id_low: u64 = Readable::read(reader)?;
6827
6828                 let mut config = Some(LegacyChannelConfig::default());
6829                 if ver == 1 {
6830                         // Read the old serialization of the ChannelConfig from version 0.0.98.
6831                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
6832                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
6833                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
6834                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
6835                 } else {
6836                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
6837                         let mut _val: u64 = Readable::read(reader)?;
6838                 }
6839
6840                 let channel_id = Readable::read(reader)?;
6841                 let channel_state = Readable::read(reader)?;
6842                 let channel_value_satoshis = Readable::read(reader)?;
6843
6844                 let latest_monitor_update_id = Readable::read(reader)?;
6845
6846                 let mut keys_data = None;
6847                 if ver <= 2 {
6848                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
6849                         // the `channel_keys_id` TLV is present below.
6850                         let keys_len: u32 = Readable::read(reader)?;
6851                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
6852                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
6853                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
6854                                 let mut data = [0; 1024];
6855                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
6856                                 reader.read_exact(read_slice)?;
6857                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
6858                         }
6859                 }
6860
6861                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
6862                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
6863                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
6864                         Err(_) => None,
6865                 };
6866                 let destination_script = Readable::read(reader)?;
6867
6868                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
6869                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
6870                 let value_to_self_msat = Readable::read(reader)?;
6871
6872                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
6873
6874                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
6875                 for _ in 0..pending_inbound_htlc_count {
6876                         pending_inbound_htlcs.push(InboundHTLCOutput {
6877                                 htlc_id: Readable::read(reader)?,
6878                                 amount_msat: Readable::read(reader)?,
6879                                 cltv_expiry: Readable::read(reader)?,
6880                                 payment_hash: Readable::read(reader)?,
6881                                 state: match <u8 as Readable>::read(reader)? {
6882                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
6883                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
6884                                         3 => InboundHTLCState::Committed,
6885                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
6886                                         _ => return Err(DecodeError::InvalidValue),
6887                                 },
6888                         });
6889                 }
6890
6891                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
6892                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
6893                 for _ in 0..pending_outbound_htlc_count {
6894                         pending_outbound_htlcs.push(OutboundHTLCOutput {
6895                                 htlc_id: Readable::read(reader)?,
6896                                 amount_msat: Readable::read(reader)?,
6897                                 cltv_expiry: Readable::read(reader)?,
6898                                 payment_hash: Readable::read(reader)?,
6899                                 source: Readable::read(reader)?,
6900                                 state: match <u8 as Readable>::read(reader)? {
6901                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
6902                                         1 => OutboundHTLCState::Committed,
6903                                         2 => {
6904                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6905                                                 OutboundHTLCState::RemoteRemoved(option.into())
6906                                         },
6907                                         3 => {
6908                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6909                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
6910                                         },
6911                                         4 => {
6912                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6913                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
6914                                         },
6915                                         _ => return Err(DecodeError::InvalidValue),
6916                                 },
6917                         });
6918                 }
6919
6920                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
6921                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
6922                 for _ in 0..holding_cell_htlc_update_count {
6923                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
6924                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
6925                                         amount_msat: Readable::read(reader)?,
6926                                         cltv_expiry: Readable::read(reader)?,
6927                                         payment_hash: Readable::read(reader)?,
6928                                         source: Readable::read(reader)?,
6929                                         onion_routing_packet: Readable::read(reader)?,
6930                                 },
6931                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
6932                                         payment_preimage: Readable::read(reader)?,
6933                                         htlc_id: Readable::read(reader)?,
6934                                 },
6935                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
6936                                         htlc_id: Readable::read(reader)?,
6937                                         err_packet: Readable::read(reader)?,
6938                                 },
6939                                 _ => return Err(DecodeError::InvalidValue),
6940                         });
6941                 }
6942
6943                 let resend_order = match <u8 as Readable>::read(reader)? {
6944                         0 => RAACommitmentOrder::CommitmentFirst,
6945                         1 => RAACommitmentOrder::RevokeAndACKFirst,
6946                         _ => return Err(DecodeError::InvalidValue),
6947                 };
6948
6949                 let monitor_pending_channel_ready = Readable::read(reader)?;
6950                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
6951                 let monitor_pending_commitment_signed = Readable::read(reader)?;
6952
6953                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
6954                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
6955                 for _ in 0..monitor_pending_forwards_count {
6956                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
6957                 }
6958
6959                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
6960                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
6961                 for _ in 0..monitor_pending_failures_count {
6962                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
6963                 }
6964
6965                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
6966
6967                 let holding_cell_update_fee = Readable::read(reader)?;
6968
6969                 let next_holder_htlc_id = Readable::read(reader)?;
6970                 let next_counterparty_htlc_id = Readable::read(reader)?;
6971                 let update_time_counter = Readable::read(reader)?;
6972                 let feerate_per_kw = Readable::read(reader)?;
6973
6974                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
6975                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
6976                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
6977                 // consider the stale state on reload.
6978                 match <u8 as Readable>::read(reader)? {
6979                         0 => {},
6980                         1 => {
6981                                 let _: u32 = Readable::read(reader)?;
6982                                 let _: u64 = Readable::read(reader)?;
6983                                 let _: Signature = Readable::read(reader)?;
6984                         },
6985                         _ => return Err(DecodeError::InvalidValue),
6986                 }
6987
6988                 let funding_tx_confirmed_in = Readable::read(reader)?;
6989                 let funding_tx_confirmation_height = Readable::read(reader)?;
6990                 let short_channel_id = Readable::read(reader)?;
6991
6992                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
6993                 let holder_dust_limit_satoshis = Readable::read(reader)?;
6994                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
6995                 let mut counterparty_selected_channel_reserve_satoshis = None;
6996                 if ver == 1 {
6997                         // Read the old serialization from version 0.0.98.
6998                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
6999                 } else {
7000                         // Read the 8 bytes of backwards-compatibility data.
7001                         let _dummy: u64 = Readable::read(reader)?;
7002                 }
7003                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7004                 let holder_htlc_minimum_msat = Readable::read(reader)?;
7005                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7006
7007                 let mut minimum_depth = None;
7008                 if ver == 1 {
7009                         // Read the old serialization from version 0.0.98.
7010                         minimum_depth = Some(Readable::read(reader)?);
7011                 } else {
7012                         // Read the 4 bytes of backwards-compatibility data.
7013                         let _dummy: u32 = Readable::read(reader)?;
7014                 }
7015
7016                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7017                         0 => None,
7018                         1 => Some(CounterpartyForwardingInfo {
7019                                 fee_base_msat: Readable::read(reader)?,
7020                                 fee_proportional_millionths: Readable::read(reader)?,
7021                                 cltv_expiry_delta: Readable::read(reader)?,
7022                         }),
7023                         _ => return Err(DecodeError::InvalidValue),
7024                 };
7025
7026                 let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7027                 let funding_transaction = Readable::read(reader)?;
7028
7029                 let counterparty_cur_commitment_point = Readable::read(reader)?;
7030
7031                 let counterparty_prev_commitment_point = Readable::read(reader)?;
7032                 let counterparty_node_id = Readable::read(reader)?;
7033
7034                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7035                 let commitment_secrets = Readable::read(reader)?;
7036
7037                 let channel_update_status = Readable::read(reader)?;
7038
7039                 #[cfg(any(test, fuzzing))]
7040                 let mut historical_inbound_htlc_fulfills = HashSet::new();
7041                 #[cfg(any(test, fuzzing))]
7042                 {
7043                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
7044                         for _ in 0..htlc_fulfills_len {
7045                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7046                         }
7047                 }
7048
7049                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7050                         Some((feerate, if channel_parameters.is_outbound_from_holder {
7051                                 FeeUpdateState::Outbound
7052                         } else {
7053                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7054                         }))
7055                 } else {
7056                         None
7057                 };
7058
7059                 let mut announcement_sigs = None;
7060                 let mut target_closing_feerate_sats_per_kw = None;
7061                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7062                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7063                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7064                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7065                 // only, so we default to that if none was written.
7066                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7067                 let mut channel_creation_height = Some(serialized_height);
7068                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7069
7070                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7071                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7072                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7073                 let mut latest_inbound_scid_alias = None;
7074                 let mut outbound_scid_alias = None;
7075                 let mut channel_pending_event_emitted = None;
7076                 let mut channel_ready_event_emitted = None;
7077
7078                 let mut user_id_high_opt: Option<u64> = None;
7079                 let mut channel_keys_id: Option<[u8; 32]> = None;
7080                 let mut temporary_channel_id: Option<[u8; 32]> = None;
7081                 let mut holder_max_accepted_htlcs: Option<u16> = None;
7082
7083                 let mut pending_monitor_updates = Some(Vec::new());
7084
7085                 read_tlv_fields!(reader, {
7086                         (0, announcement_sigs, option),
7087                         (1, minimum_depth, option),
7088                         (2, channel_type, option),
7089                         (3, counterparty_selected_channel_reserve_satoshis, option),
7090                         (4, holder_selected_channel_reserve_satoshis, option),
7091                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7092                         (6, holder_max_htlc_value_in_flight_msat, option),
7093                         (7, shutdown_scriptpubkey, option),
7094                         (9, target_closing_feerate_sats_per_kw, option),
7095                         (11, monitor_pending_finalized_fulfills, vec_type),
7096                         (13, channel_creation_height, option),
7097                         (15, preimages_opt, vec_type),
7098                         (17, announcement_sigs_state, option),
7099                         (19, latest_inbound_scid_alias, option),
7100                         (21, outbound_scid_alias, option),
7101                         (23, channel_ready_event_emitted, option),
7102                         (25, user_id_high_opt, option),
7103                         (27, channel_keys_id, option),
7104                         (28, holder_max_accepted_htlcs, option),
7105                         (29, temporary_channel_id, option),
7106                         (31, channel_pending_event_emitted, option),
7107                         (33, pending_monitor_updates, vec_type),
7108                 });
7109
7110                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7111                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7112                         // If we've gotten to the funding stage of the channel, populate the signer with its
7113                         // required channel parameters.
7114                         let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7115                         if non_shutdown_state >= (ChannelState::FundingCreated as u32) {
7116                                 holder_signer.provide_channel_parameters(&channel_parameters);
7117                         }
7118                         (channel_keys_id, holder_signer)
7119                 } else {
7120                         // `keys_data` can be `None` if we had corrupted data.
7121                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7122                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7123                         (holder_signer.channel_keys_id(), holder_signer)
7124                 };
7125
7126                 if let Some(preimages) = preimages_opt {
7127                         let mut iter = preimages.into_iter();
7128                         for htlc in pending_outbound_htlcs.iter_mut() {
7129                                 match &htlc.state {
7130                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7131                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7132                                         }
7133                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7134                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7135                                         }
7136                                         _ => {}
7137                                 }
7138                         }
7139                         // We expect all preimages to be consumed above
7140                         if iter.next().is_some() {
7141                                 return Err(DecodeError::InvalidValue);
7142                         }
7143                 }
7144
7145                 let chan_features = channel_type.as_ref().unwrap();
7146                 if !chan_features.is_subset(our_supported_features) {
7147                         // If the channel was written by a new version and negotiated with features we don't
7148                         // understand yet, refuse to read it.
7149                         return Err(DecodeError::UnknownRequiredFeature);
7150                 }
7151
7152                 let mut secp_ctx = Secp256k1::new();
7153                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7154
7155                 // `user_id` used to be a single u64 value. In order to remain backwards
7156                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7157                 // separate u64 values.
7158                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7159
7160                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7161
7162                 Ok(Channel {
7163                         context: ChannelContext {
7164                                 user_id,
7165
7166                                 config: config.unwrap(),
7167
7168                                 prev_config: None,
7169
7170                                 // Note that we don't care about serializing handshake limits as we only ever serialize
7171                                 // channel data after the handshake has completed.
7172                                 inbound_handshake_limits_override: None,
7173
7174                                 channel_id,
7175                                 temporary_channel_id,
7176                                 channel_state,
7177                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
7178                                 secp_ctx,
7179                                 channel_value_satoshis,
7180
7181                                 latest_monitor_update_id,
7182
7183                                 holder_signer,
7184                                 shutdown_scriptpubkey,
7185                                 destination_script,
7186
7187                                 cur_holder_commitment_transaction_number,
7188                                 cur_counterparty_commitment_transaction_number,
7189                                 value_to_self_msat,
7190
7191                                 holder_max_accepted_htlcs,
7192                                 pending_inbound_htlcs,
7193                                 pending_outbound_htlcs,
7194                                 holding_cell_htlc_updates,
7195
7196                                 resend_order,
7197
7198                                 monitor_pending_channel_ready,
7199                                 monitor_pending_revoke_and_ack,
7200                                 monitor_pending_commitment_signed,
7201                                 monitor_pending_forwards,
7202                                 monitor_pending_failures,
7203                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7204
7205                                 pending_update_fee,
7206                                 holding_cell_update_fee,
7207                                 next_holder_htlc_id,
7208                                 next_counterparty_htlc_id,
7209                                 update_time_counter,
7210                                 feerate_per_kw,
7211
7212                                 #[cfg(debug_assertions)]
7213                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7214                                 #[cfg(debug_assertions)]
7215                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7216
7217                                 last_sent_closing_fee: None,
7218                                 pending_counterparty_closing_signed: None,
7219                                 closing_fee_limits: None,
7220                                 target_closing_feerate_sats_per_kw,
7221
7222                                 inbound_awaiting_accept: false,
7223
7224                                 funding_tx_confirmed_in,
7225                                 funding_tx_confirmation_height,
7226                                 short_channel_id,
7227                                 channel_creation_height: channel_creation_height.unwrap(),
7228
7229                                 counterparty_dust_limit_satoshis,
7230                                 holder_dust_limit_satoshis,
7231                                 counterparty_max_htlc_value_in_flight_msat,
7232                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7233                                 counterparty_selected_channel_reserve_satoshis,
7234                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7235                                 counterparty_htlc_minimum_msat,
7236                                 holder_htlc_minimum_msat,
7237                                 counterparty_max_accepted_htlcs,
7238                                 minimum_depth,
7239
7240                                 counterparty_forwarding_info,
7241
7242                                 channel_transaction_parameters: channel_parameters,
7243                                 funding_transaction,
7244
7245                                 counterparty_cur_commitment_point,
7246                                 counterparty_prev_commitment_point,
7247                                 counterparty_node_id,
7248
7249                                 counterparty_shutdown_scriptpubkey,
7250
7251                                 commitment_secrets,
7252
7253                                 channel_update_status,
7254                                 closing_signed_in_flight: false,
7255
7256                                 announcement_sigs,
7257
7258                                 #[cfg(any(test, fuzzing))]
7259                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7260                                 #[cfg(any(test, fuzzing))]
7261                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7262
7263                                 workaround_lnd_bug_4006: None,
7264                                 sent_message_awaiting_response: None,
7265
7266                                 latest_inbound_scid_alias,
7267                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7268                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7269
7270                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7271                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7272
7273                                 #[cfg(any(test, fuzzing))]
7274                                 historical_inbound_htlc_fulfills,
7275
7276                                 channel_type: channel_type.unwrap(),
7277                                 channel_keys_id,
7278
7279                                 pending_monitor_updates: pending_monitor_updates.unwrap(),
7280                         }
7281                 })
7282         }
7283 }
7284
7285 #[cfg(test)]
7286 mod tests {
7287         use std::cmp;
7288         use bitcoin::blockdata::script::{Script, Builder};
7289         use bitcoin::blockdata::transaction::{Transaction, TxOut};
7290         use bitcoin::blockdata::constants::genesis_block;
7291         use bitcoin::blockdata::opcodes;
7292         use bitcoin::network::constants::Network;
7293         use hex;
7294         use crate::ln::PaymentHash;
7295         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7296         #[cfg(anchors)]
7297         use crate::ln::channel::InitFeatures;
7298         use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7299         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7300         use crate::ln::features::ChannelTypeFeatures;
7301         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7302         use crate::ln::script::ShutdownScript;
7303         use crate::ln::chan_utils;
7304         use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7305         use crate::chain::BestBlock;
7306         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7307         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7308         use crate::chain::transaction::OutPoint;
7309         use crate::routing::router::Path;
7310         use crate::util::config::UserConfig;
7311         use crate::util::enforcing_trait_impls::EnforcingSigner;
7312         use crate::util::errors::APIError;
7313         use crate::util::test_utils;
7314         use crate::util::test_utils::OnGetShutdownScriptpubkey;
7315         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7316         use bitcoin::secp256k1::ffi::Signature as FFISignature;
7317         use bitcoin::secp256k1::{SecretKey,PublicKey};
7318         use bitcoin::hashes::sha256::Hash as Sha256;
7319         use bitcoin::hashes::Hash;
7320         use bitcoin::hash_types::WPubkeyHash;
7321         use bitcoin::PackedLockTime;
7322         use bitcoin::util::address::WitnessVersion;
7323         use crate::prelude::*;
7324
7325         struct TestFeeEstimator {
7326                 fee_est: u32
7327         }
7328         impl FeeEstimator for TestFeeEstimator {
7329                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7330                         self.fee_est
7331                 }
7332         }
7333
7334         #[test]
7335         fn test_max_funding_satoshis_no_wumbo() {
7336                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7337                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7338                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7339         }
7340
7341         #[test]
7342         fn test_no_fee_check_overflow() {
7343                 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7344                 // arithmetic, causing a panic with debug assertions enabled.
7345                 let fee_est = TestFeeEstimator { fee_est: 42 };
7346                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7347                 assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator,
7348                         u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7349         }
7350
7351         struct Keys {
7352                 signer: InMemorySigner,
7353         }
7354
7355         impl EntropySource for Keys {
7356                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7357         }
7358
7359         impl SignerProvider for Keys {
7360                 type Signer = InMemorySigner;
7361
7362                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7363                         self.signer.channel_keys_id()
7364                 }
7365
7366                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7367                         self.signer.clone()
7368                 }
7369
7370                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7371
7372                 fn get_destination_script(&self) -> Result<Script, ()> {
7373                         let secp_ctx = Secp256k1::signing_only();
7374                         let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7375                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7376                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7377                 }
7378
7379                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7380                         let secp_ctx = Secp256k1::signing_only();
7381                         let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7382                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7383                 }
7384         }
7385
7386         #[cfg(not(feature = "grind_signatures"))]
7387         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7388                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7389         }
7390
7391         #[test]
7392         fn upfront_shutdown_script_incompatibility() {
7393                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7394                 let non_v0_segwit_shutdown_script =
7395                         ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7396
7397                 let seed = [42; 32];
7398                 let network = Network::Testnet;
7399                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7400                 keys_provider.expect(OnGetShutdownScriptpubkey {
7401                         returns: non_v0_segwit_shutdown_script.clone(),
7402                 });
7403
7404                 let secp_ctx = Secp256k1::new();
7405                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7406                 let config = UserConfig::default();
7407                 match OutboundV1Channel::<EnforcingSigner>::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7408                         Err(APIError::IncompatibleShutdownScript { script }) => {
7409                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7410                         },
7411                         Err(e) => panic!("Unexpected error: {:?}", e),
7412                         Ok(_) => panic!("Expected error"),
7413                 }
7414         }
7415
7416         // Check that, during channel creation, we use the same feerate in the open channel message
7417         // as we do in the Channel object creation itself.
7418         #[test]
7419         fn test_open_channel_msg_fee() {
7420                 let original_fee = 253;
7421                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7422                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7423                 let secp_ctx = Secp256k1::new();
7424                 let seed = [42; 32];
7425                 let network = Network::Testnet;
7426                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7427
7428                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7429                 let config = UserConfig::default();
7430                 let node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7431
7432                 // Now change the fee so we can check that the fee in the open_channel message is the
7433                 // same as the old fee.
7434                 fee_est.fee_est = 500;
7435                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7436                 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7437         }
7438
7439         #[test]
7440         fn test_holder_vs_counterparty_dust_limit() {
7441                 // Test that when calculating the local and remote commitment transaction fees, the correct
7442                 // dust limits are used.
7443                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7444                 let secp_ctx = Secp256k1::new();
7445                 let seed = [42; 32];
7446                 let network = Network::Testnet;
7447                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7448                 let logger = test_utils::TestLogger::new();
7449
7450                 // Go through the flow of opening a channel between two nodes, making sure
7451                 // they have different dust limits.
7452
7453                 // Create Node A's channel pointing to Node B's pubkey
7454                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7455                 let config = UserConfig::default();
7456                 let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7457
7458                 // Create Node B's channel by receiving Node A's open_channel message
7459                 // Make sure A's dust limit is as we expect.
7460                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7461                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7462                 let mut node_b_chan = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
7463
7464                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7465                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
7466                 accept_channel_msg.dust_limit_satoshis = 546;
7467                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7468                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7469
7470                 // Put some inbound and outbound HTLCs in A's channel.
7471                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7472                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7473                         htlc_id: 0,
7474                         amount_msat: htlc_amount_msat,
7475                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7476                         cltv_expiry: 300000000,
7477                         state: InboundHTLCState::Committed,
7478                 });
7479
7480                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7481                         htlc_id: 1,
7482                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7483                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7484                         cltv_expiry: 200000000,
7485                         state: OutboundHTLCState::Committed,
7486                         source: HTLCSource::OutboundRoute {
7487                                 path: Path { hops: Vec::new(), blinded_tail: None },
7488                                 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7489                                 first_hop_htlc_msat: 548,
7490                                 payment_id: PaymentId([42; 32]),
7491                         }
7492                 });
7493
7494                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7495                 // the dust limit check.
7496                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7497                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7498                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors());
7499                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7500
7501                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7502                 // of the HTLCs are seen to be above the dust limit.
7503                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7504                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors());
7505                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7506                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7507                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7508         }
7509
7510         #[test]
7511         fn test_timeout_vs_success_htlc_dust_limit() {
7512                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7513                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7514                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7515                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7516                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7517                 let secp_ctx = Secp256k1::new();
7518                 let seed = [42; 32];
7519                 let network = Network::Testnet;
7520                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7521
7522                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7523                 let config = UserConfig::default();
7524                 let mut chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7525
7526                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors());
7527                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors());
7528
7529                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7530                 // counted as dust when it shouldn't be.
7531                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7532                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7533                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7534                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7535
7536                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7537                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7538                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7539                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7540                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7541
7542                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7543
7544                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7545                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7546                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7547                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7548                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7549
7550                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7551                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7552                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7553                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7554                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7555         }
7556
7557         #[test]
7558         fn channel_reestablish_no_updates() {
7559                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7560                 let logger = test_utils::TestLogger::new();
7561                 let secp_ctx = Secp256k1::new();
7562                 let seed = [42; 32];
7563                 let network = Network::Testnet;
7564                 let best_block = BestBlock::from_network(network);
7565                 let chain_hash = best_block.block_hash();
7566                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7567
7568                 // Go through the flow of opening a channel between two nodes.
7569
7570                 // Create Node A's channel pointing to Node B's pubkey
7571                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7572                 let config = UserConfig::default();
7573                 let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7574
7575                 // Create Node B's channel by receiving Node A's open_channel message
7576                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
7577                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7578                 let mut node_b_chan = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
7579
7580                 // Node B --> Node A: accept channel
7581                 let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
7582                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7583
7584                 // Node A --> Node B: funding created
7585                 let output_script = node_a_chan.context.get_funding_redeemscript();
7586                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7587                         value: 10000000, script_pubkey: output_script.clone(),
7588                 }]};
7589                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7590                 let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
7591                 let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
7592
7593                 // Node B --> Node A: funding signed
7594                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
7595
7596                 // Now disconnect the two nodes and check that the commitment point in
7597                 // Node B's channel_reestablish message is sane.
7598                 node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
7599                 let msg = node_b_chan.get_channel_reestablish(&&logger);
7600                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7601                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7602                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7603
7604                 // Check that the commitment point in Node A's channel_reestablish message
7605                 // is sane.
7606                 node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
7607                 let msg = node_a_chan.get_channel_reestablish(&&logger);
7608                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7609                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7610                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7611         }
7612
7613         #[test]
7614         fn test_configured_holder_max_htlc_value_in_flight() {
7615                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7616                 let logger = test_utils::TestLogger::new();
7617                 let secp_ctx = Secp256k1::new();
7618                 let seed = [42; 32];
7619                 let network = Network::Testnet;
7620                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7621                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7622                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7623
7624                 let mut config_2_percent = UserConfig::default();
7625                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
7626                 let mut config_99_percent = UserConfig::default();
7627                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
7628                 let mut config_0_percent = UserConfig::default();
7629                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
7630                 let mut config_101_percent = UserConfig::default();
7631                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
7632
7633                 // Test that `new_outbound` creates a channel with the correct value for
7634                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7635                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
7636                 let chan_1 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
7637                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
7638                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
7639
7640                 // Test with the upper bound - 1 of valid values (99%).
7641                 let chan_2 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
7642                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
7643                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
7644
7645                 let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
7646
7647                 // Test that `new_from_req` creates a channel with the correct value for
7648                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7649                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
7650                 let chan_3 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
7651                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
7652                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
7653
7654                 // Test with the upper bound - 1 of valid values (99%).
7655                 let chan_4 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
7656                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
7657                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
7658
7659                 // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
7660                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
7661                 let chan_5 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
7662                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
7663                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
7664
7665                 // Test that `new_outbound` uses the upper bound of the configurable percentage values
7666                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
7667                 // than 100.
7668                 let chan_6 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
7669                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
7670                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
7671
7672                 // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
7673                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
7674                 let chan_7 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
7675                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
7676                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
7677
7678                 // Test that `new_from_req` uses the upper bound of the configurable percentage values
7679                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
7680                 // than 100.
7681                 let chan_8 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
7682                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
7683                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
7684         }
7685
7686         #[test]
7687         fn test_configured_holder_selected_channel_reserve_satoshis() {
7688
7689                 // Test that `new_outbound` and `new_from_req` create a channel with the correct
7690                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
7691                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
7692
7693                 // Test with valid but unreasonably high channel reserves
7694                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
7695                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
7696                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
7697
7698                 // Test with calculated channel reserve less than lower bound
7699                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7700                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
7701
7702                 // Test with invalid channel reserves since sum of both is greater than or equal
7703                 // to channel value
7704                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
7705                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
7706         }
7707
7708         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
7709                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
7710                 let logger = test_utils::TestLogger::new();
7711                 let secp_ctx = Secp256k1::new();
7712                 let seed = [42; 32];
7713                 let network = Network::Testnet;
7714                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7715                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7716                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7717
7718
7719                 let mut outbound_node_config = UserConfig::default();
7720                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
7721                 let chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
7722
7723                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
7724                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
7725
7726                 let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
7727                 let mut inbound_node_config = UserConfig::default();
7728                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
7729
7730                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
7731                         let chan_inbound_node = InboundV1Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
7732
7733                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
7734
7735                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
7736                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
7737                 } else {
7738                         // Channel Negotiations failed
7739                         let result = InboundV1Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
7740                         assert!(result.is_err());
7741                 }
7742         }
7743
7744         #[test]
7745         fn channel_update() {
7746                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7747                 let secp_ctx = Secp256k1::new();
7748                 let seed = [42; 32];
7749                 let network = Network::Testnet;
7750                 let chain_hash = genesis_block(network).header.block_hash();
7751                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7752
7753                 // Create a channel.
7754                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7755                 let config = UserConfig::default();
7756                 let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7757                 assert!(node_a_chan.context.counterparty_forwarding_info.is_none());
7758                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default
7759                 assert!(node_a_chan.context.counterparty_forwarding_info().is_none());
7760
7761                 // Make sure that receiving a channel update will update the Channel as expected.
7762                 let update = ChannelUpdate {
7763                         contents: UnsignedChannelUpdate {
7764                                 chain_hash,
7765                                 short_channel_id: 0,
7766                                 timestamp: 0,
7767                                 flags: 0,
7768                                 cltv_expiry_delta: 100,
7769                                 htlc_minimum_msat: 5,
7770                                 htlc_maximum_msat: MAX_VALUE_MSAT,
7771                                 fee_base_msat: 110,
7772                                 fee_proportional_millionths: 11,
7773                                 excess_data: Vec::new(),
7774                         },
7775                         signature: Signature::from(unsafe { FFISignature::new() })
7776                 };
7777                 node_a_chan.channel_update(&update).unwrap();
7778
7779                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
7780                 // change our official htlc_minimum_msat.
7781                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
7782                 match node_a_chan.context.counterparty_forwarding_info() {
7783                         Some(info) => {
7784                                 assert_eq!(info.cltv_expiry_delta, 100);
7785                                 assert_eq!(info.fee_base_msat, 110);
7786                                 assert_eq!(info.fee_proportional_millionths, 11);
7787                         },
7788                         None => panic!("expected counterparty forwarding info to be Some")
7789                 }
7790         }
7791
7792         #[cfg(feature = "_test_vectors")]
7793         #[test]
7794         fn outbound_commitment_test() {
7795                 use bitcoin::util::sighash;
7796                 use bitcoin::consensus::encode::serialize;
7797                 use bitcoin::blockdata::transaction::EcdsaSighashType;
7798                 use bitcoin::hashes::hex::FromHex;
7799                 use bitcoin::hash_types::Txid;
7800                 use bitcoin::secp256k1::Message;
7801                 use crate::sign::EcdsaChannelSigner;
7802                 use crate::ln::PaymentPreimage;
7803                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
7804                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
7805                 use crate::util::logger::Logger;
7806                 use crate::sync::Arc;
7807
7808                 // Test vectors from BOLT 3 Appendices C and F (anchors):
7809                 let feeest = TestFeeEstimator{fee_est: 15000};
7810                 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
7811                 let secp_ctx = Secp256k1::new();
7812
7813                 let mut signer = InMemorySigner::new(
7814                         &secp_ctx,
7815                         SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
7816                         SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7817                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
7818                         SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
7819                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
7820
7821                         // These aren't set in the test vectors:
7822                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
7823                         10_000_000,
7824                         [0; 32],
7825                         [0; 32],
7826                 );
7827
7828                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
7829                                 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
7830                 let keys_provider = Keys { signer: signer.clone() };
7831
7832                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7833                 let mut config = UserConfig::default();
7834                 config.channel_handshake_config.announced_channel = false;
7835                 let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
7836                 chan.context.holder_dust_limit_satoshis = 546;
7837                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
7838
7839                 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
7840
7841                 let counterparty_pubkeys = ChannelPublicKeys {
7842                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
7843                         revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
7844                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
7845                         delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
7846                         htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
7847                 };
7848                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
7849                         CounterpartyChannelTransactionParameters {
7850                                 pubkeys: counterparty_pubkeys.clone(),
7851                                 selected_contest_delay: 144
7852                         });
7853                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
7854                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
7855
7856                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
7857                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
7858
7859                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
7860                            hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
7861
7862                 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
7863                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
7864
7865                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
7866                 // derived from a commitment_seed, so instead we copy it here and call
7867                 // build_commitment_transaction.
7868                 let delayed_payment_base = &chan.context.holder_signer.pubkeys().delayed_payment_basepoint;
7869                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
7870                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
7871                 let htlc_basepoint = &chan.context.holder_signer.pubkeys().htlc_basepoint;
7872                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
7873
7874                 macro_rules! test_commitment {
7875                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
7876                                 chan.context.channel_transaction_parameters.opt_anchors = None;
7877                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*);
7878                         };
7879                 }
7880
7881                 macro_rules! test_commitment_with_anchors {
7882                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
7883                                 chan.context.channel_transaction_parameters.opt_anchors = Some(());
7884                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*);
7885                         };
7886                 }
7887
7888                 macro_rules! test_commitment_common {
7889                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
7890                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
7891                         } ) => { {
7892                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
7893                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
7894
7895                                         let htlcs = commitment_stats.htlcs_included.drain(..)
7896                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
7897                                                 .collect();
7898                                         (commitment_stats.tx, htlcs)
7899                                 };
7900                                 let trusted_tx = commitment_tx.trust();
7901                                 let unsigned_tx = trusted_tx.built_transaction();
7902                                 let redeemscript = chan.context.get_funding_redeemscript();
7903                                 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
7904                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
7905                                 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
7906                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
7907
7908                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
7909                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
7910                                 let mut counterparty_htlc_sigs = Vec::new();
7911                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
7912                                 $({
7913                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
7914                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
7915                                         counterparty_htlc_sigs.push(remote_signature);
7916                                 })*
7917                                 assert_eq!(htlcs.len(), per_htlc.len());
7918
7919                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7920                                         commitment_tx.clone(),
7921                                         counterparty_signature,
7922                                         counterparty_htlc_sigs,
7923                                         &chan.context.holder_signer.pubkeys().funding_pubkey,
7924                                         chan.context.counterparty_funding_pubkey()
7925                                 );
7926                                 let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap();
7927                                 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
7928
7929                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
7930                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
7931                                 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
7932
7933                                 // ((htlc, counterparty_sig), (index, holder_sig))
7934                                 let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate());
7935
7936                                 $({
7937                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
7938                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
7939
7940                                         let ref htlc = htlcs[$htlc_idx];
7941                                         let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
7942                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
7943                                                 &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
7944                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
7945                                         let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
7946                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
7947                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
7948
7949                                         let mut preimage: Option<PaymentPreimage> = None;
7950                                         if !htlc.offered {
7951                                                 for i in 0..5 {
7952                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
7953                                                         if out == htlc.payment_hash {
7954                                                                 preimage = Some(PaymentPreimage([i; 32]));
7955                                                         }
7956                                                 }
7957
7958                                                 assert!(preimage.is_some());
7959                                         }
7960
7961                                         let htlc_sig = htlc_sig_iter.next().unwrap();
7962                                         let num_anchors = if $opt_anchors { 2 } else { 0 };
7963                                         assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
7964
7965                                         let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
7966                                         assert_eq!(signature, *(htlc_sig.1).1, "htlc sig");
7967                                         let index = (htlc_sig.1).0;
7968                                         let channel_parameters = chan.context.channel_transaction_parameters.as_holder_broadcastable();
7969                                         let trusted_tx = holder_commitment_tx.trust();
7970                                         log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))));
7971                                         assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..],
7972                                                         hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
7973                                 })*
7974                                 assert!(htlc_sig_iter.next().is_none());
7975                         } }
7976                 }
7977
7978                 // anchors: simple commitment tx with no HTLCs and single anchor
7979                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
7980                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
7981                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7982
7983                 // simple commitment tx with no HTLCs
7984                 chan.context.value_to_self_msat = 7000000000;
7985
7986                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
7987                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
7988                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7989
7990                 // anchors: simple commitment tx with no HTLCs
7991                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
7992                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
7993                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7994
7995                 chan.context.pending_inbound_htlcs.push({
7996                         let mut out = InboundHTLCOutput{
7997                                 htlc_id: 0,
7998                                 amount_msat: 1000000,
7999                                 cltv_expiry: 500,
8000                                 payment_hash: PaymentHash([0; 32]),
8001                                 state: InboundHTLCState::Committed,
8002                         };
8003                         out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8004                         out
8005                 });
8006                 chan.context.pending_inbound_htlcs.push({
8007                         let mut out = InboundHTLCOutput{
8008                                 htlc_id: 1,
8009                                 amount_msat: 2000000,
8010                                 cltv_expiry: 501,
8011                                 payment_hash: PaymentHash([0; 32]),
8012                                 state: InboundHTLCState::Committed,
8013                         };
8014                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8015                         out
8016                 });
8017                 chan.context.pending_outbound_htlcs.push({
8018                         let mut out = OutboundHTLCOutput{
8019                                 htlc_id: 2,
8020                                 amount_msat: 2000000,
8021                                 cltv_expiry: 502,
8022                                 payment_hash: PaymentHash([0; 32]),
8023                                 state: OutboundHTLCState::Committed,
8024                                 source: HTLCSource::dummy(),
8025                         };
8026                         out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8027                         out
8028                 });
8029                 chan.context.pending_outbound_htlcs.push({
8030                         let mut out = OutboundHTLCOutput{
8031                                 htlc_id: 3,
8032                                 amount_msat: 3000000,
8033                                 cltv_expiry: 503,
8034                                 payment_hash: PaymentHash([0; 32]),
8035                                 state: OutboundHTLCState::Committed,
8036                                 source: HTLCSource::dummy(),
8037                         };
8038                         out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8039                         out
8040                 });
8041                 chan.context.pending_inbound_htlcs.push({
8042                         let mut out = InboundHTLCOutput{
8043                                 htlc_id: 4,
8044                                 amount_msat: 4000000,
8045                                 cltv_expiry: 504,
8046                                 payment_hash: PaymentHash([0; 32]),
8047                                 state: InboundHTLCState::Committed,
8048                         };
8049                         out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8050                         out
8051                 });
8052
8053                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8054                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8055                 chan.context.feerate_per_kw = 0;
8056
8057                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8058                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8059                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8060
8061                                   { 0,
8062                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8063                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8064                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8065
8066                                   { 1,
8067                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8068                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8069                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8070
8071                                   { 2,
8072                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8073                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8074                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8075
8076                                   { 3,
8077                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8078                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8079                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8080
8081                                   { 4,
8082                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8083                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8084                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8085                 } );
8086
8087                 // commitment tx with seven outputs untrimmed (maximum feerate)
8088                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8089                 chan.context.feerate_per_kw = 647;
8090
8091                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8092                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8093                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8094
8095                                   { 0,
8096                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8097                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8098                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8099
8100                                   { 1,
8101                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8102                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8103                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8104
8105                                   { 2,
8106                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8107                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8108                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8109
8110                                   { 3,
8111                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8112                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8113                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8114
8115                                   { 4,
8116                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8117                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8118                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8119                 } );
8120
8121                 // commitment tx with six outputs untrimmed (minimum feerate)
8122                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8123                 chan.context.feerate_per_kw = 648;
8124
8125                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8126                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8127                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8128
8129                                   { 0,
8130                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8131                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8132                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8133
8134                                   { 1,
8135                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8136                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8137                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8138
8139                                   { 2,
8140                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8141                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8142                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8143
8144                                   { 3,
8145                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8146                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8147                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8148                 } );
8149
8150                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8151                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8152                 chan.context.feerate_per_kw = 645;
8153                 chan.context.holder_dust_limit_satoshis = 1001;
8154
8155                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8156                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8157                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8158
8159                                   { 0,
8160                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8161                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8162                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8163
8164                                   { 1,
8165                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8166                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8167                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8168
8169                                   { 2,
8170                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8171                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8172                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8173
8174                                   { 3,
8175                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8176                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8177                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8178                 } );
8179
8180                 // commitment tx with six outputs untrimmed (maximum feerate)
8181                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8182                 chan.context.feerate_per_kw = 2069;
8183                 chan.context.holder_dust_limit_satoshis = 546;
8184
8185                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8186                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8187                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8188
8189                                   { 0,
8190                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8191                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8192                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8193
8194                                   { 1,
8195                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8196                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8197                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8198
8199                                   { 2,
8200                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8201                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8202                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8203
8204                                   { 3,
8205                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8206                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8207                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8208                 } );
8209
8210                 // commitment tx with five outputs untrimmed (minimum feerate)
8211                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8212                 chan.context.feerate_per_kw = 2070;
8213
8214                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8215                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8216                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8217
8218                                   { 0,
8219                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8220                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8221                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8222
8223                                   { 1,
8224                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8225                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8226                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8227
8228                                   { 2,
8229                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8230                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8231                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8232                 } );
8233
8234                 // commitment tx with five outputs untrimmed (maximum feerate)
8235                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8236                 chan.context.feerate_per_kw = 2194;
8237
8238                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8239                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8240                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8241
8242                                   { 0,
8243                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8244                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8245                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8246
8247                                   { 1,
8248                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8249                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8250                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8251
8252                                   { 2,
8253                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8254                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8255                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8256                 } );
8257
8258                 // commitment tx with four outputs untrimmed (minimum feerate)
8259                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8260                 chan.context.feerate_per_kw = 2195;
8261
8262                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8263                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8264                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8265
8266                                   { 0,
8267                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8268                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8269                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8270
8271                                   { 1,
8272                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8273                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8274                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8275                 } );
8276
8277                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8278                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8279                 chan.context.feerate_per_kw = 2185;
8280                 chan.context.holder_dust_limit_satoshis = 2001;
8281
8282                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8283                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8284                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8285
8286                                   { 0,
8287                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8288                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8289                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8290
8291                                   { 1,
8292                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8293                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8294                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8295                 } );
8296
8297                 // commitment tx with four outputs untrimmed (maximum feerate)
8298                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8299                 chan.context.feerate_per_kw = 3702;
8300                 chan.context.holder_dust_limit_satoshis = 546;
8301
8302                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8303                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8304                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8305
8306                                   { 0,
8307                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8308                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8309                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8310
8311                                   { 1,
8312                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8313                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8314                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8315                 } );
8316
8317                 // commitment tx with three outputs untrimmed (minimum feerate)
8318                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8319                 chan.context.feerate_per_kw = 3703;
8320
8321                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8322                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8323                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8324
8325                                   { 0,
8326                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8327                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8328                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8329                 } );
8330
8331                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8332                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8333                 chan.context.feerate_per_kw = 3687;
8334                 chan.context.holder_dust_limit_satoshis = 3001;
8335
8336                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8337                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8338                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8339
8340                                   { 0,
8341                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8342                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8343                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8344                 } );
8345
8346                 // commitment tx with three outputs untrimmed (maximum feerate)
8347                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8348                 chan.context.feerate_per_kw = 4914;
8349                 chan.context.holder_dust_limit_satoshis = 546;
8350
8351                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8352                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8353                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8354
8355                                   { 0,
8356                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8357                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8358                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8359                 } );
8360
8361                 // commitment tx with two outputs untrimmed (minimum feerate)
8362                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8363                 chan.context.feerate_per_kw = 4915;
8364                 chan.context.holder_dust_limit_satoshis = 546;
8365
8366                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8367                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8368                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8369
8370                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8371                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8372                 chan.context.feerate_per_kw = 4894;
8373                 chan.context.holder_dust_limit_satoshis = 4001;
8374
8375                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8376                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8377                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8378
8379                 // commitment tx with two outputs untrimmed (maximum feerate)
8380                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8381                 chan.context.feerate_per_kw = 9651180;
8382                 chan.context.holder_dust_limit_satoshis = 546;
8383
8384                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8385                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8386                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8387
8388                 // commitment tx with one output untrimmed (minimum feerate)
8389                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8390                 chan.context.feerate_per_kw = 9651181;
8391
8392                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8393                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8394                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8395
8396                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8397                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8398                 chan.context.feerate_per_kw = 6216010;
8399                 chan.context.holder_dust_limit_satoshis = 4001;
8400
8401                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8402                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8403                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8404
8405                 // commitment tx with fee greater than funder amount
8406                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8407                 chan.context.feerate_per_kw = 9651936;
8408                 chan.context.holder_dust_limit_satoshis = 546;
8409
8410                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8411                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8412                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8413
8414                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8415                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8416                 chan.context.feerate_per_kw = 253;
8417                 chan.context.pending_inbound_htlcs.clear();
8418                 chan.context.pending_inbound_htlcs.push({
8419                         let mut out = InboundHTLCOutput{
8420                                 htlc_id: 1,
8421                                 amount_msat: 2000000,
8422                                 cltv_expiry: 501,
8423                                 payment_hash: PaymentHash([0; 32]),
8424                                 state: InboundHTLCState::Committed,
8425                         };
8426                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8427                         out
8428                 });
8429                 chan.context.pending_outbound_htlcs.clear();
8430                 chan.context.pending_outbound_htlcs.push({
8431                         let mut out = OutboundHTLCOutput{
8432                                 htlc_id: 6,
8433                                 amount_msat: 5000001,
8434                                 cltv_expiry: 506,
8435                                 payment_hash: PaymentHash([0; 32]),
8436                                 state: OutboundHTLCState::Committed,
8437                                 source: HTLCSource::dummy(),
8438                         };
8439                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8440                         out
8441                 });
8442                 chan.context.pending_outbound_htlcs.push({
8443                         let mut out = OutboundHTLCOutput{
8444                                 htlc_id: 5,
8445                                 amount_msat: 5000000,
8446                                 cltv_expiry: 505,
8447                                 payment_hash: PaymentHash([0; 32]),
8448                                 state: OutboundHTLCState::Committed,
8449                                 source: HTLCSource::dummy(),
8450                         };
8451                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8452                         out
8453                 });
8454
8455                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8456                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8457                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8458
8459                                   { 0,
8460                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8461                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8462                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8463                                   { 1,
8464                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8465                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8466                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8467                                   { 2,
8468                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8469                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8470                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8471                 } );
8472
8473                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8474                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8475                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8476
8477                                   { 0,
8478                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8479                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8480                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8481                                   { 1,
8482                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8483                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8484                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8485                                   { 2,
8486                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8487                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8488                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8489                 } );
8490         }
8491
8492         #[test]
8493         fn test_per_commitment_secret_gen() {
8494                 // Test vectors from BOLT 3 Appendix D:
8495
8496                 let mut seed = [0; 32];
8497                 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8498                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8499                            hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8500
8501                 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8502                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8503                            hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8504
8505                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8506                            hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8507
8508                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8509                            hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8510
8511                 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8512                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8513                            hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
8514         }
8515
8516         #[test]
8517         fn test_key_derivation() {
8518                 // Test vectors from BOLT 3 Appendix E:
8519                 let secp_ctx = Secp256k1::new();
8520
8521                 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
8522                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8523
8524                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
8525                 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
8526
8527                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8528                 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
8529
8530                 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8531                                 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
8532
8533                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
8534                                 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
8535
8536                 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8537                                 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
8538
8539                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
8540                                 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
8541         }
8542
8543         #[test]
8544         fn test_zero_conf_channel_type_support() {
8545                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8546                 let secp_ctx = Secp256k1::new();
8547                 let seed = [42; 32];
8548                 let network = Network::Testnet;
8549                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8550                 let logger = test_utils::TestLogger::new();
8551
8552                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8553                 let config = UserConfig::default();
8554                 let node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider,
8555                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8556
8557                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8558                 channel_type_features.set_zero_conf_required();
8559
8560                 let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
8561                 open_channel_msg.channel_type = Some(channel_type_features);
8562                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8563                 let res = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider,
8564                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
8565                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42);
8566                 assert!(res.is_ok());
8567         }
8568
8569         #[cfg(anchors)]
8570         #[test]
8571         fn test_supports_anchors_zero_htlc_tx_fee() {
8572                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
8573                 // resulting `channel_type`.
8574                 let secp_ctx = Secp256k1::new();
8575                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8576                 let network = Network::Testnet;
8577                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8578                 let logger = test_utils::TestLogger::new();
8579
8580                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8581                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8582
8583                 let mut config = UserConfig::default();
8584                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
8585
8586                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
8587                 // need to signal it.
8588                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8589                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8590                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
8591                         &config, 0, 42
8592                 ).unwrap();
8593                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
8594
8595                 let mut expected_channel_type = ChannelTypeFeatures::empty();
8596                 expected_channel_type.set_static_remote_key_required();
8597                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
8598
8599                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8600                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8601                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8602                 ).unwrap();
8603
8604                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8605                 let channel_b = InboundV1Channel::<EnforcingSigner>::new_from_req(
8606                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8607                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
8608                         &open_channel_msg, 7, &config, 0, &&logger, 42
8609                 ).unwrap();
8610
8611                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
8612                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
8613         }
8614
8615         #[cfg(anchors)]
8616         #[test]
8617         fn test_rejects_implicit_simple_anchors() {
8618                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
8619                 // each side's `InitFeatures`, it is rejected.
8620                 let secp_ctx = Secp256k1::new();
8621                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8622                 let network = Network::Testnet;
8623                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8624                 let logger = test_utils::TestLogger::new();
8625
8626                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8627                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8628
8629                 let config = UserConfig::default();
8630
8631                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
8632                 let static_remote_key_required: u64 = 1 << 12;
8633                 let simple_anchors_required: u64 = 1 << 20;
8634                 let raw_init_features = static_remote_key_required | simple_anchors_required;
8635                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
8636
8637                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8638                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8639                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8640                 ).unwrap();
8641
8642                 // Set `channel_type` to `None` to force the implicit feature negotiation.
8643                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8644                 open_channel_msg.channel_type = None;
8645
8646                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
8647                 // `static_remote_key`, it will fail the channel.
8648                 let channel_b = InboundV1Channel::<EnforcingSigner>::new_from_req(
8649                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8650                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
8651                         &open_channel_msg, 7, &config, 0, &&logger, 42
8652                 );
8653                 assert!(channel_b.is_err());
8654         }
8655
8656         #[cfg(anchors)]
8657         #[test]
8658         fn test_rejects_simple_anchors_channel_type() {
8659                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
8660                 // it is rejected.
8661                 let secp_ctx = Secp256k1::new();
8662                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8663                 let network = Network::Testnet;
8664                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8665                 let logger = test_utils::TestLogger::new();
8666
8667                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8668                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8669
8670                 let config = UserConfig::default();
8671
8672                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
8673                 let static_remote_key_required: u64 = 1 << 12;
8674                 let simple_anchors_required: u64 = 1 << 20;
8675                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
8676                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
8677                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
8678                 assert!(simple_anchors_init.requires_unknown_bits());
8679                 assert!(simple_anchors_channel_type.requires_unknown_bits());
8680
8681                 // First, we'll try to open a channel between A and B where A requests a channel type for
8682                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
8683                 // B as it's not supported by LDK.
8684                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8685                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8686                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8687                 ).unwrap();
8688
8689                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8690                 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
8691
8692                 let res = InboundV1Channel::<EnforcingSigner>::new_from_req(
8693                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8694                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
8695                         &open_channel_msg, 7, &config, 0, &&logger, 42
8696                 );
8697                 assert!(res.is_err());
8698
8699                 // Then, we'll try to open another channel where A requests a channel type for
8700                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
8701                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
8702                 // LDK.
8703                 let mut channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8704                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
8705                         10000000, 100000, 42, &config, 0, 42
8706                 ).unwrap();
8707
8708                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8709
8710                 let channel_b = InboundV1Channel::<EnforcingSigner>::new_from_req(
8711                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8712                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
8713                         &open_channel_msg, 7, &config, 0, &&logger, 42
8714                 ).unwrap();
8715
8716                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
8717                 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
8718
8719                 let res = channel_a.accept_channel(
8720                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
8721                 );
8722                 assert!(res.is_err());
8723         }
8724 }