Move outbound channel constructor into `OutboundV1Channel` impl
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::script::{Script,Builder};
11 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
12 use bitcoin::util::sighash;
13 use bitcoin::consensus::encode;
14
15 use bitcoin::hashes::Hash;
16 use bitcoin::hashes::sha256::Hash as Sha256;
17 use bitcoin::hashes::sha256d::Hash as Sha256d;
18 use bitcoin::hash_types::{Txid, BlockHash};
19
20 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
21 use bitcoin::secp256k1::{PublicKey,SecretKey};
22 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
23 use bitcoin::secp256k1;
24
25 use crate::ln::{PaymentPreimage, PaymentHash};
26 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
27 use crate::ln::msgs;
28 use crate::ln::msgs::DecodeError;
29 use crate::ln::script::{self, ShutdownScript};
30 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
31 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
32 use crate::ln::chan_utils;
33 use crate::ln::onion_utils::HTLCFailReason;
34 use crate::chain::BestBlock;
35 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
36 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
37 use crate::chain::transaction::{OutPoint, TransactionData};
38 use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
39 use crate::events::ClosureReason;
40 use crate::routing::gossip::NodeId;
41 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
42 use crate::util::logger::Logger;
43 use crate::util::errors::APIError;
44 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
45 use crate::util::scid_utils::scid_from_parts;
46
47 use crate::io;
48 use crate::prelude::*;
49 use core::{cmp,mem,fmt};
50 use core::ops::Deref;
51 #[cfg(any(test, fuzzing, debug_assertions))]
52 use crate::sync::Mutex;
53 use bitcoin::hashes::hex::ToHex;
54
55 #[cfg(test)]
56 pub struct ChannelValueStat {
57         pub value_to_self_msat: u64,
58         pub channel_value_msat: u64,
59         pub channel_reserve_msat: u64,
60         pub pending_outbound_htlcs_amount_msat: u64,
61         pub pending_inbound_htlcs_amount_msat: u64,
62         pub holding_cell_outbound_amount_msat: u64,
63         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
64         pub counterparty_dust_limit_msat: u64,
65 }
66
67 pub struct AvailableBalances {
68         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
69         pub balance_msat: u64,
70         /// Total amount available for our counterparty to send to us.
71         pub inbound_capacity_msat: u64,
72         /// Total amount available for us to send to our counterparty.
73         pub outbound_capacity_msat: u64,
74         /// The maximum value we can assign to the next outbound HTLC
75         pub next_outbound_htlc_limit_msat: u64,
76         /// The minimum value we can assign to the next outbound HTLC
77         pub next_outbound_htlc_minimum_msat: u64,
78 }
79
80 #[derive(Debug, Clone, Copy, PartialEq)]
81 enum FeeUpdateState {
82         // Inbound states mirroring InboundHTLCState
83         RemoteAnnounced,
84         AwaitingRemoteRevokeToAnnounce,
85         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
86         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
87         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
88         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
89         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
90
91         // Outbound state can only be `LocalAnnounced` or `Committed`
92         Outbound,
93 }
94
95 enum InboundHTLCRemovalReason {
96         FailRelay(msgs::OnionErrorPacket),
97         FailMalformed(([u8; 32], u16)),
98         Fulfill(PaymentPreimage),
99 }
100
101 enum InboundHTLCState {
102         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
103         /// update_add_htlc message for this HTLC.
104         RemoteAnnounced(PendingHTLCStatus),
105         /// Included in a received commitment_signed message (implying we've
106         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
107         /// state (see the example below). We have not yet included this HTLC in a
108         /// commitment_signed message because we are waiting on the remote's
109         /// aforementioned state revocation. One reason this missing remote RAA
110         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
111         /// is because every time we create a new "state", i.e. every time we sign a
112         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
113         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
114         /// sent provided the per_commitment_point for our current commitment tx.
115         /// The other reason we should not send a commitment_signed without their RAA
116         /// is because their RAA serves to ACK our previous commitment_signed.
117         ///
118         /// Here's an example of how an HTLC could come to be in this state:
119         /// remote --> update_add_htlc(prev_htlc)   --> local
120         /// remote --> commitment_signed(prev_htlc) --> local
121         /// remote <-- revoke_and_ack               <-- local
122         /// remote <-- commitment_signed(prev_htlc) <-- local
123         /// [note that here, the remote does not respond with a RAA]
124         /// remote --> update_add_htlc(this_htlc)   --> local
125         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
126         /// Now `this_htlc` will be assigned this state. It's unable to be officially
127         /// accepted, i.e. included in a commitment_signed, because we're missing the
128         /// RAA that provides our next per_commitment_point. The per_commitment_point
129         /// is used to derive commitment keys, which are used to construct the
130         /// signatures in a commitment_signed message.
131         /// Implies AwaitingRemoteRevoke.
132         ///
133         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
134         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
135         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
136         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
137         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
138         /// channel (before it can then get forwarded and/or removed).
139         /// Implies AwaitingRemoteRevoke.
140         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
141         Committed,
142         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
143         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
144         /// we'll drop it.
145         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
146         /// commitment transaction without it as otherwise we'll have to force-close the channel to
147         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
148         /// anyway). That said, ChannelMonitor does this for us (see
149         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
150         /// our own local state before then, once we're sure that the next commitment_signed and
151         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
152         LocalRemoved(InboundHTLCRemovalReason),
153 }
154
155 struct InboundHTLCOutput {
156         htlc_id: u64,
157         amount_msat: u64,
158         cltv_expiry: u32,
159         payment_hash: PaymentHash,
160         state: InboundHTLCState,
161 }
162
163 enum OutboundHTLCState {
164         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
165         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
166         /// we will promote to Committed (note that they may not accept it until the next time we
167         /// revoke, but we don't really care about that:
168         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
169         ///    money back (though we won't), and,
170         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
171         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
172         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
173         ///    we'll never get out of sync).
174         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
175         /// OutboundHTLCOutput's size just for a temporary bit
176         LocalAnnounced(Box<msgs::OnionPacket>),
177         Committed,
178         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
179         /// the change (though they'll need to revoke before we fail the payment).
180         RemoteRemoved(OutboundHTLCOutcome),
181         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
182         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
183         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
184         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
185         /// remote revoke_and_ack on a previous state before we can do so.
186         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
187         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
188         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
189         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
190         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
191         /// revoke_and_ack to drop completely.
192         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
193 }
194
195 #[derive(Clone)]
196 enum OutboundHTLCOutcome {
197         /// LDK version 0.0.105+ will always fill in the preimage here.
198         Success(Option<PaymentPreimage>),
199         Failure(HTLCFailReason),
200 }
201
202 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
203         fn from(o: Option<HTLCFailReason>) -> Self {
204                 match o {
205                         None => OutboundHTLCOutcome::Success(None),
206                         Some(r) => OutboundHTLCOutcome::Failure(r)
207                 }
208         }
209 }
210
211 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
212         fn into(self) -> Option<&'a HTLCFailReason> {
213                 match self {
214                         OutboundHTLCOutcome::Success(_) => None,
215                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
216                 }
217         }
218 }
219
220 struct OutboundHTLCOutput {
221         htlc_id: u64,
222         amount_msat: u64,
223         cltv_expiry: u32,
224         payment_hash: PaymentHash,
225         state: OutboundHTLCState,
226         source: HTLCSource,
227 }
228
229 /// See AwaitingRemoteRevoke ChannelState for more info
230 enum HTLCUpdateAwaitingACK {
231         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
232                 // always outbound
233                 amount_msat: u64,
234                 cltv_expiry: u32,
235                 payment_hash: PaymentHash,
236                 source: HTLCSource,
237                 onion_routing_packet: msgs::OnionPacket,
238         },
239         ClaimHTLC {
240                 payment_preimage: PaymentPreimage,
241                 htlc_id: u64,
242         },
243         FailHTLC {
244                 htlc_id: u64,
245                 err_packet: msgs::OnionErrorPacket,
246         },
247 }
248
249 /// There are a few "states" and then a number of flags which can be applied:
250 /// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
251 /// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
252 /// move on to ChannelReady.
253 /// Note that PeerDisconnected can be set on both ChannelReady and FundingSent.
254 /// ChannelReady can then get all remaining flags set on it, until we finish shutdown, then we
255 /// move on to ShutdownComplete, at which point most calls into this channel are disallowed.
256 enum ChannelState {
257         /// Implies we have (or are prepared to) send our open_channel/accept_channel message
258         OurInitSent = 1 << 0,
259         /// Implies we have received their open_channel/accept_channel message
260         TheirInitSent = 1 << 1,
261         /// We have sent funding_created and are awaiting a funding_signed to advance to FundingSent.
262         /// Note that this is nonsense for an inbound channel as we immediately generate funding_signed
263         /// upon receipt of funding_created, so simply skip this state.
264         FundingCreated = 4,
265         /// Set when we have received/sent funding_created and funding_signed and are thus now waiting
266         /// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
267         /// and our counterparty consider the funding transaction confirmed.
268         FundingSent = 8,
269         /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
270         /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
271         TheirChannelReady = 1 << 4,
272         /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
273         /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
274         OurChannelReady = 1 << 5,
275         ChannelReady = 64,
276         /// Flag which is set on ChannelReady and FundingSent indicating remote side is considered
277         /// "disconnected" and no updates are allowed until after we've done a channel_reestablish
278         /// dance.
279         PeerDisconnected = 1 << 7,
280         /// Flag which is set on ChannelReady, FundingCreated, and FundingSent indicating the user has
281         /// told us a ChannelMonitor update is pending async persistence somewhere and we should pause
282         /// sending any outbound messages until they've managed to finish.
283         MonitorUpdateInProgress = 1 << 8,
284         /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
285         /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
286         /// messages as then we will be unable to determine which HTLCs they included in their
287         /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
288         /// later.
289         /// Flag is set on ChannelReady.
290         AwaitingRemoteRevoke = 1 << 9,
291         /// Flag which is set on ChannelReady or FundingSent after receiving a shutdown message from
292         /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
293         /// to respond with our own shutdown message when possible.
294         RemoteShutdownSent = 1 << 10,
295         /// Flag which is set on ChannelReady or FundingSent after sending a shutdown message. At this
296         /// point, we may not add any new HTLCs to the channel.
297         LocalShutdownSent = 1 << 11,
298         /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
299         /// to drop us, but we store this anyway.
300         ShutdownComplete = 4096,
301 }
302 const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
303 const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
304
305 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
306
307 pub const DEFAULT_MAX_HTLCS: u16 = 50;
308
309 pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
310         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
311         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
312         if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
313 }
314
315 #[cfg(not(test))]
316 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
317 #[cfg(test)]
318 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
319
320 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
321
322 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
323 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
324 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
325 /// `holder_max_htlc_value_in_flight_msat`.
326 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
327
328 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
329 /// `option_support_large_channel` (aka wumbo channels) is not supported.
330 /// It's 2^24 - 1.
331 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
332
333 /// Total bitcoin supply in satoshis.
334 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
335
336 /// The maximum network dust limit for standard script formats. This currently represents the
337 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
338 /// transaction non-standard and thus refuses to relay it.
339 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
340 /// implementations use this value for their dust limit today.
341 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
342
343 /// The maximum channel dust limit we will accept from our counterparty.
344 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
345
346 /// The dust limit is used for both the commitment transaction outputs as well as the closing
347 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
348 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
349 /// In order to avoid having to concern ourselves with standardness during the closing process, we
350 /// simply require our counterparty to use a dust limit which will leave any segwit output
351 /// standard.
352 /// See <https://github.com/lightning/bolts/issues/905> for more details.
353 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
354
355 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
356 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
357
358 /// Used to return a simple Error back to ChannelManager. Will get converted to a
359 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
360 /// channel_id in ChannelManager.
361 pub(super) enum ChannelError {
362         Ignore(String),
363         Warn(String),
364         Close(String),
365 }
366
367 impl fmt::Debug for ChannelError {
368         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
369                 match self {
370                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
371                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
372                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
373                 }
374         }
375 }
376
377 macro_rules! secp_check {
378         ($res: expr, $err: expr) => {
379                 match $res {
380                         Ok(thing) => thing,
381                         Err(_) => return Err(ChannelError::Close($err)),
382                 }
383         };
384 }
385
386 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
387 /// our counterparty or not. However, we don't want to announce updates right away to avoid
388 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
389 /// our channel_update message and track the current state here.
390 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
391 #[derive(Clone, Copy, PartialEq)]
392 pub(super) enum ChannelUpdateStatus {
393         /// We've announced the channel as enabled and are connected to our peer.
394         Enabled,
395         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
396         DisabledStaged(u8),
397         /// Our channel is live again, but we haven't announced the channel as enabled yet.
398         EnabledStaged(u8),
399         /// We've announced the channel as disabled.
400         Disabled,
401 }
402
403 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
404 #[derive(PartialEq)]
405 pub enum AnnouncementSigsState {
406         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
407         /// we sent the last `AnnouncementSignatures`.
408         NotSent,
409         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
410         /// This state never appears on disk - instead we write `NotSent`.
411         MessageSent,
412         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
413         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
414         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
415         /// they send back a `RevokeAndACK`.
416         /// This state never appears on disk - instead we write `NotSent`.
417         Committed,
418         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
419         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
420         PeerReceived,
421 }
422
423 /// An enum indicating whether the local or remote side offered a given HTLC.
424 enum HTLCInitiator {
425         LocalOffered,
426         RemoteOffered,
427 }
428
429 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
430 struct HTLCStats {
431         pending_htlcs: u32,
432         pending_htlcs_value_msat: u64,
433         on_counterparty_tx_dust_exposure_msat: u64,
434         on_holder_tx_dust_exposure_msat: u64,
435         holding_cell_msat: u64,
436         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
437 }
438
439 /// An enum gathering stats on commitment transaction, either local or remote.
440 struct CommitmentStats<'a> {
441         tx: CommitmentTransaction, // the transaction info
442         feerate_per_kw: u32, // the feerate included to build the transaction
443         total_fee_sat: u64, // the total fee included in the transaction
444         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
445         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
446         local_balance_msat: u64, // local balance before fees but considering dust limits
447         remote_balance_msat: u64, // remote balance before fees but considering dust limits
448         preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
449 }
450
451 /// Used when calculating whether we or the remote can afford an additional HTLC.
452 struct HTLCCandidate {
453         amount_msat: u64,
454         origin: HTLCInitiator,
455 }
456
457 impl HTLCCandidate {
458         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
459                 Self {
460                         amount_msat,
461                         origin,
462                 }
463         }
464 }
465
466 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
467 /// description
468 enum UpdateFulfillFetch {
469         NewClaim {
470                 monitor_update: ChannelMonitorUpdate,
471                 htlc_value_msat: u64,
472                 msg: Option<msgs::UpdateFulfillHTLC>,
473         },
474         DuplicateClaim {},
475 }
476
477 /// The return type of get_update_fulfill_htlc_and_commit.
478 pub enum UpdateFulfillCommitFetch<'a> {
479         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
480         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
481         /// previously placed in the holding cell (and has since been removed).
482         NewClaim {
483                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
484                 monitor_update: &'a ChannelMonitorUpdate,
485                 /// The value of the HTLC which was claimed, in msat.
486                 htlc_value_msat: u64,
487         },
488         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
489         /// or has been forgotten (presumably previously claimed).
490         DuplicateClaim {},
491 }
492
493 /// The return value of `monitor_updating_restored`
494 pub(super) struct MonitorRestoreUpdates {
495         pub raa: Option<msgs::RevokeAndACK>,
496         pub commitment_update: Option<msgs::CommitmentUpdate>,
497         pub order: RAACommitmentOrder,
498         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
499         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
500         pub finalized_claimed_htlcs: Vec<HTLCSource>,
501         pub funding_broadcastable: Option<Transaction>,
502         pub channel_ready: Option<msgs::ChannelReady>,
503         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
504 }
505
506 /// The return value of `channel_reestablish`
507 pub(super) struct ReestablishResponses {
508         pub channel_ready: Option<msgs::ChannelReady>,
509         pub raa: Option<msgs::RevokeAndACK>,
510         pub commitment_update: Option<msgs::CommitmentUpdate>,
511         pub order: RAACommitmentOrder,
512         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
513         pub shutdown_msg: Option<msgs::Shutdown>,
514 }
515
516 /// The return type of `force_shutdown`
517 pub(crate) type ShutdownResult = (
518         Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
519         Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
520 );
521
522 /// If the majority of the channels funds are to the fundee and the initiator holds only just
523 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
524 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
525 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
526 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
527 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
528 /// by this multiple without hitting this case, before sending.
529 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
530 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
531 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
532 /// leave the channel less usable as we hold a bigger reserve.
533 #[cfg(any(fuzzing, test))]
534 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
535 #[cfg(not(any(fuzzing, test)))]
536 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
537
538 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
539 /// channel creation on an inbound channel, we simply force-close and move on.
540 /// This constant is the one suggested in BOLT 2.
541 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
542
543 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
544 /// not have enough balance value remaining to cover the onchain cost of this new
545 /// HTLC weight. If this happens, our counterparty fails the reception of our
546 /// commitment_signed including this new HTLC due to infringement on the channel
547 /// reserve.
548 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
549 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
550 /// leads to a channel force-close. Ultimately, this is an issue coming from the
551 /// design of LN state machines, allowing asynchronous updates.
552 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
553
554 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
555 /// commitment transaction fees, with at least this many HTLCs present on the commitment
556 /// transaction (not counting the value of the HTLCs themselves).
557 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
558
559 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
560 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
561 /// ChannelUpdate prompted by the config update. This value was determined as follows:
562 ///
563 ///   * The expected interval between ticks (1 minute).
564 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
565 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
566 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
567 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
568
569 /// The number of ticks that may elapse while we're waiting for a response to a
570 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
571 /// them.
572 ///
573 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
574 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
575
576 struct PendingChannelMonitorUpdate {
577         update: ChannelMonitorUpdate,
578         /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
579         /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
580         /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
581         ///
582         /// [`ChannelManager`]: super::channelmanager::ChannelManager
583         blocked: bool,
584 }
585
586 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
587         (0, update, required),
588         (2, blocked, required),
589 });
590
591 /// Contains everything about the channel including state, and various flags.
592 pub(super) struct ChannelContext<Signer: ChannelSigner> {
593         config: LegacyChannelConfig,
594
595         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
596         // constructed using it. The second element in the tuple corresponds to the number of ticks that
597         // have elapsed since the update occurred.
598         prev_config: Option<(ChannelConfig, usize)>,
599
600         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
601
602         user_id: u128,
603
604         channel_id: [u8; 32],
605         temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
606         channel_state: u32,
607
608         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
609         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
610         // next connect.
611         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
612         // Note that a number of our tests were written prior to the behavior here which retransmits
613         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
614         // many tests.
615         #[cfg(any(test, feature = "_test_utils"))]
616         pub(crate) announcement_sigs_state: AnnouncementSigsState,
617         #[cfg(not(any(test, feature = "_test_utils")))]
618         announcement_sigs_state: AnnouncementSigsState,
619
620         secp_ctx: Secp256k1<secp256k1::All>,
621         channel_value_satoshis: u64,
622
623         latest_monitor_update_id: u64,
624
625         holder_signer: Signer,
626         shutdown_scriptpubkey: Option<ShutdownScript>,
627         destination_script: Script,
628
629         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
630         // generation start at 0 and count up...this simplifies some parts of implementation at the
631         // cost of others, but should really just be changed.
632
633         cur_holder_commitment_transaction_number: u64,
634         cur_counterparty_commitment_transaction_number: u64,
635         value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
636         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
637         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
638         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
639
640         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
641         /// need to ensure we resend them in the order we originally generated them. Note that because
642         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
643         /// sufficient to simply set this to the opposite of any message we are generating as we
644         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
645         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
646         /// send it first.
647         resend_order: RAACommitmentOrder,
648
649         monitor_pending_channel_ready: bool,
650         monitor_pending_revoke_and_ack: bool,
651         monitor_pending_commitment_signed: bool,
652
653         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
654         // responsible for some of the HTLCs here or not - we don't know whether the update in question
655         // completed or not. We currently ignore these fields entirely when force-closing a channel,
656         // but need to handle this somehow or we run the risk of losing HTLCs!
657         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
658         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
659         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
660
661         // pending_update_fee is filled when sending and receiving update_fee.
662         //
663         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
664         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
665         // generating new commitment transactions with exactly the same criteria as inbound/outbound
666         // HTLCs with similar state.
667         pending_update_fee: Option<(u32, FeeUpdateState)>,
668         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
669         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
670         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
671         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
672         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
673         holding_cell_update_fee: Option<u32>,
674         next_holder_htlc_id: u64,
675         next_counterparty_htlc_id: u64,
676         feerate_per_kw: u32,
677
678         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
679         /// when the channel is updated in ways which may impact the `channel_update` message or when a
680         /// new block is received, ensuring it's always at least moderately close to the current real
681         /// time.
682         update_time_counter: u32,
683
684         #[cfg(debug_assertions)]
685         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
686         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
687         #[cfg(debug_assertions)]
688         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
689         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
690
691         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
692         target_closing_feerate_sats_per_kw: Option<u32>,
693
694         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
695         /// update, we need to delay processing it until later. We do that here by simply storing the
696         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
697         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
698
699         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
700         /// transaction. These are set once we reach `closing_negotiation_ready`.
701         #[cfg(test)]
702         pub(crate) closing_fee_limits: Option<(u64, u64)>,
703         #[cfg(not(test))]
704         closing_fee_limits: Option<(u64, u64)>,
705
706         /// Flag that ensures that `accept_inbound_channel` must be called before `funding_created`
707         /// is executed successfully. The reason for this flag is that when the
708         /// `UserConfig::manually_accept_inbound_channels` config flag is set to true, inbound channels
709         /// are required to be manually accepted by the node operator before the `msgs::AcceptChannel`
710         /// message is created and sent out. During the manual accept process, `accept_inbound_channel`
711         /// is called by `ChannelManager::accept_inbound_channel`.
712         ///
713         /// The flag counteracts that a counterparty node could theoretically send a
714         /// `msgs::FundingCreated` message before the node operator has manually accepted an inbound
715         /// channel request made by the counterparty node. That would execute `funding_created` before
716         /// `accept_inbound_channel`, and `funding_created` should therefore not execute successfully.
717         inbound_awaiting_accept: bool,
718
719         /// The hash of the block in which the funding transaction was included.
720         funding_tx_confirmed_in: Option<BlockHash>,
721         funding_tx_confirmation_height: u32,
722         short_channel_id: Option<u64>,
723         /// Either the height at which this channel was created or the height at which it was last
724         /// serialized if it was serialized by versions prior to 0.0.103.
725         /// We use this to close if funding is never broadcasted.
726         channel_creation_height: u32,
727
728         counterparty_dust_limit_satoshis: u64,
729
730         #[cfg(test)]
731         pub(super) holder_dust_limit_satoshis: u64,
732         #[cfg(not(test))]
733         holder_dust_limit_satoshis: u64,
734
735         #[cfg(test)]
736         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
737         #[cfg(not(test))]
738         counterparty_max_htlc_value_in_flight_msat: u64,
739
740         #[cfg(test)]
741         pub(super) holder_max_htlc_value_in_flight_msat: u64,
742         #[cfg(not(test))]
743         holder_max_htlc_value_in_flight_msat: u64,
744
745         /// minimum channel reserve for self to maintain - set by them.
746         counterparty_selected_channel_reserve_satoshis: Option<u64>,
747
748         #[cfg(test)]
749         pub(super) holder_selected_channel_reserve_satoshis: u64,
750         #[cfg(not(test))]
751         holder_selected_channel_reserve_satoshis: u64,
752
753         counterparty_htlc_minimum_msat: u64,
754         holder_htlc_minimum_msat: u64,
755         #[cfg(test)]
756         pub counterparty_max_accepted_htlcs: u16,
757         #[cfg(not(test))]
758         counterparty_max_accepted_htlcs: u16,
759         holder_max_accepted_htlcs: u16,
760         minimum_depth: Option<u32>,
761
762         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
763
764         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
765         funding_transaction: Option<Transaction>,
766
767         counterparty_cur_commitment_point: Option<PublicKey>,
768         counterparty_prev_commitment_point: Option<PublicKey>,
769         counterparty_node_id: PublicKey,
770
771         counterparty_shutdown_scriptpubkey: Option<Script>,
772
773         commitment_secrets: CounterpartyCommitmentSecrets,
774
775         channel_update_status: ChannelUpdateStatus,
776         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
777         /// not complete within a single timer tick (one minute), we should force-close the channel.
778         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
779         /// to DoS us.
780         /// Note that this field is reset to false on deserialization to give us a chance to connect to
781         /// our peer and start the closing_signed negotiation fresh.
782         closing_signed_in_flight: bool,
783
784         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
785         /// This can be used to rebroadcast the channel_announcement message later.
786         announcement_sigs: Option<(Signature, Signature)>,
787
788         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
789         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
790         // be, by comparing the cached values to the fee of the tranaction generated by
791         // `build_commitment_transaction`.
792         #[cfg(any(test, fuzzing))]
793         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
794         #[cfg(any(test, fuzzing))]
795         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
796
797         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
798         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
799         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
800         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
801         /// message until we receive a channel_reestablish.
802         ///
803         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
804         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
805
806         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
807         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
808         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
809         /// unblock the state machine.
810         ///
811         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
812         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
813         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
814         ///
815         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
816         /// [`msgs::RevokeAndACK`] message from the counterparty.
817         sent_message_awaiting_response: Option<usize>,
818
819         #[cfg(any(test, fuzzing))]
820         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
821         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
822         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
823         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
824         // is fine, but as a sanity check in our failure to generate the second claim, we check here
825         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
826         historical_inbound_htlc_fulfills: HashSet<u64>,
827
828         /// This channel's type, as negotiated during channel open
829         channel_type: ChannelTypeFeatures,
830
831         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
832         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
833         // the channel's funding UTXO.
834         //
835         // We also use this when sending our peer a channel_update that isn't to be broadcasted
836         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
837         // associated channel mapping.
838         //
839         // We only bother storing the most recent SCID alias at any time, though our counterparty has
840         // to store all of them.
841         latest_inbound_scid_alias: Option<u64>,
842
843         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
844         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
845         // don't currently support node id aliases and eventually privacy should be provided with
846         // blinded paths instead of simple scid+node_id aliases.
847         outbound_scid_alias: u64,
848
849         // We track whether we already emitted a `ChannelPending` event.
850         channel_pending_event_emitted: bool,
851
852         // We track whether we already emitted a `ChannelReady` event.
853         channel_ready_event_emitted: bool,
854
855         /// The unique identifier used to re-derive the private key material for the channel through
856         /// [`SignerProvider::derive_channel_signer`].
857         channel_keys_id: [u8; 32],
858
859         /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
860         /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
861         /// completes we still need to be able to complete the persistence. Thus, we have to keep a
862         /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
863         pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
864 }
865
866 impl<Signer: ChannelSigner> ChannelContext<Signer> {
867         pub(crate) fn opt_anchors(&self) -> bool {
868                 self.channel_transaction_parameters.opt_anchors.is_some()
869         }
870
871         /// Allowed in any state (including after shutdown)
872         pub fn get_update_time_counter(&self) -> u32 {
873                 self.update_time_counter
874         }
875
876         pub fn get_latest_monitor_update_id(&self) -> u64 {
877                 self.latest_monitor_update_id
878         }
879
880         pub fn should_announce(&self) -> bool {
881                 self.config.announced_channel
882         }
883
884         pub fn is_outbound(&self) -> bool {
885                 self.channel_transaction_parameters.is_outbound_from_holder
886         }
887
888         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
889         /// Allowed in any state (including after shutdown)
890         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
891                 self.config.options.forwarding_fee_base_msat
892         }
893
894         /// Returns true if we've ever received a message from the remote end for this Channel
895         pub fn have_received_message(&self) -> bool {
896                 self.channel_state > (ChannelState::OurInitSent as u32)
897         }
898
899         /// Returns true if this channel is fully established and not known to be closing.
900         /// Allowed in any state (including after shutdown)
901         pub fn is_usable(&self) -> bool {
902                 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
903                 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
904         }
905
906         /// Returns true if this channel is currently available for use. This is a superset of
907         /// is_usable() and considers things like the channel being temporarily disabled.
908         /// Allowed in any state (including after shutdown)
909         pub fn is_live(&self) -> bool {
910                 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
911         }
912
913         // Public utilities:
914
915         pub fn channel_id(&self) -> [u8; 32] {
916                 self.channel_id
917         }
918
919         // Return the `temporary_channel_id` used during channel establishment.
920         //
921         // Will return `None` for channels created prior to LDK version 0.0.115.
922         pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
923                 self.temporary_channel_id
924         }
925
926         pub fn minimum_depth(&self) -> Option<u32> {
927                 self.minimum_depth
928         }
929
930         /// Gets the "user_id" value passed into the construction of this channel. It has no special
931         /// meaning and exists only to allow users to have a persistent identifier of a channel.
932         pub fn get_user_id(&self) -> u128 {
933                 self.user_id
934         }
935
936         /// Gets the channel's type
937         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
938                 &self.channel_type
939         }
940
941         /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
942         /// is_usable() returns true).
943         /// Allowed in any state (including after shutdown)
944         pub fn get_short_channel_id(&self) -> Option<u64> {
945                 self.short_channel_id
946         }
947
948         /// Allowed in any state (including after shutdown)
949         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
950                 self.latest_inbound_scid_alias
951         }
952
953         /// Allowed in any state (including after shutdown)
954         pub fn outbound_scid_alias(&self) -> u64 {
955                 self.outbound_scid_alias
956         }
957
958         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
959         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
960         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
961                 assert_eq!(self.outbound_scid_alias, 0);
962                 self.outbound_scid_alias = outbound_scid_alias;
963         }
964
965         /// Returns the funding_txo we either got from our peer, or were given by
966         /// get_outbound_funding_created.
967         pub fn get_funding_txo(&self) -> Option<OutPoint> {
968                 self.channel_transaction_parameters.funding_outpoint
969         }
970
971         /// Returns the block hash in which our funding transaction was confirmed.
972         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
973                 self.funding_tx_confirmed_in
974         }
975
976         /// Returns the current number of confirmations on the funding transaction.
977         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
978                 if self.funding_tx_confirmation_height == 0 {
979                         // We either haven't seen any confirmation yet, or observed a reorg.
980                         return 0;
981                 }
982
983                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
984         }
985
986         fn get_holder_selected_contest_delay(&self) -> u16 {
987                 self.channel_transaction_parameters.holder_selected_contest_delay
988         }
989
990         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
991                 &self.channel_transaction_parameters.holder_pubkeys
992         }
993
994         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
995                 self.channel_transaction_parameters.counterparty_parameters
996                         .as_ref().map(|params| params.selected_contest_delay)
997         }
998
999         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1000                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1001         }
1002
1003         /// Allowed in any state (including after shutdown)
1004         pub fn get_counterparty_node_id(&self) -> PublicKey {
1005                 self.counterparty_node_id
1006         }
1007
1008         /// Allowed in any state (including after shutdown)
1009         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1010                 self.holder_htlc_minimum_msat
1011         }
1012
1013         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1014         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1015                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1016         }
1017
1018         /// Allowed in any state (including after shutdown)
1019         pub fn get_announced_htlc_max_msat(&self) -> u64 {
1020                 return cmp::min(
1021                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1022                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
1023                         // channel might have been used to route very small values (either by honest users or as DoS).
1024                         self.channel_value_satoshis * 1000 * 9 / 10,
1025
1026                         self.counterparty_max_htlc_value_in_flight_msat
1027                 );
1028         }
1029
1030         /// Allowed in any state (including after shutdown)
1031         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1032                 self.counterparty_htlc_minimum_msat
1033         }
1034
1035         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1036         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1037                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1038         }
1039
1040         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1041                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1042                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1043                         cmp::min(
1044                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1045                                 party_max_htlc_value_in_flight_msat
1046                         )
1047                 })
1048         }
1049
1050         pub fn get_value_satoshis(&self) -> u64 {
1051                 self.channel_value_satoshis
1052         }
1053
1054         pub fn get_fee_proportional_millionths(&self) -> u32 {
1055                 self.config.options.forwarding_fee_proportional_millionths
1056         }
1057
1058         pub fn get_cltv_expiry_delta(&self) -> u16 {
1059                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1060         }
1061
1062         pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
1063                 self.config.options.max_dust_htlc_exposure_msat
1064         }
1065
1066         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1067         pub fn prev_config(&self) -> Option<ChannelConfig> {
1068                 self.prev_config.map(|prev_config| prev_config.0)
1069         }
1070
1071         // Checks whether we should emit a `ChannelPending` event.
1072         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1073                 self.is_funding_initiated() && !self.channel_pending_event_emitted
1074         }
1075
1076         // Returns whether we already emitted a `ChannelPending` event.
1077         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1078                 self.channel_pending_event_emitted
1079         }
1080
1081         // Remembers that we already emitted a `ChannelPending` event.
1082         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1083                 self.channel_pending_event_emitted = true;
1084         }
1085
1086         // Checks whether we should emit a `ChannelReady` event.
1087         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1088                 self.is_usable() && !self.channel_ready_event_emitted
1089         }
1090
1091         // Remembers that we already emitted a `ChannelReady` event.
1092         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1093                 self.channel_ready_event_emitted = true;
1094         }
1095
1096         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1097         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1098         /// no longer be considered when forwarding HTLCs.
1099         pub fn maybe_expire_prev_config(&mut self) {
1100                 if self.prev_config.is_none() {
1101                         return;
1102                 }
1103                 let prev_config = self.prev_config.as_mut().unwrap();
1104                 prev_config.1 += 1;
1105                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1106                         self.prev_config = None;
1107                 }
1108         }
1109
1110         /// Returns the current [`ChannelConfig`] applied to the channel.
1111         pub fn config(&self) -> ChannelConfig {
1112                 self.config.options
1113         }
1114
1115         /// Updates the channel's config. A bool is returned indicating whether the config update
1116         /// applied resulted in a new ChannelUpdate message.
1117         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1118                 let did_channel_update =
1119                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1120                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1121                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1122                 if did_channel_update {
1123                         self.prev_config = Some((self.config.options, 0));
1124                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1125                         // policy change to propagate throughout the network.
1126                         self.update_time_counter += 1;
1127                 }
1128                 self.config.options = *config;
1129                 did_channel_update
1130         }
1131
1132         /// Returns true if funding_created was sent/received.
1133         pub fn is_funding_initiated(&self) -> bool {
1134                 self.channel_state >= ChannelState::FundingSent as u32
1135         }
1136
1137         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1138         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1139         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1140         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1141         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1142         /// an HTLC to a).
1143         /// @local is used only to convert relevant internal structures which refer to remote vs local
1144         /// to decide value of outputs and direction of HTLCs.
1145         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1146         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1147         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1148         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1149         /// which peer generated this transaction and "to whom" this transaction flows.
1150         #[inline]
1151         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1152                 where L::Target: Logger
1153         {
1154                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1155                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1156                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1157
1158                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1159                 let mut remote_htlc_total_msat = 0;
1160                 let mut local_htlc_total_msat = 0;
1161                 let mut value_to_self_msat_offset = 0;
1162
1163                 let mut feerate_per_kw = self.feerate_per_kw;
1164                 if let Some((feerate, update_state)) = self.pending_update_fee {
1165                         if match update_state {
1166                                 // Note that these match the inclusion criteria when scanning
1167                                 // pending_inbound_htlcs below.
1168                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1169                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1170                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
1171                         } {
1172                                 feerate_per_kw = feerate;
1173                         }
1174                 }
1175
1176                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1177                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1178                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1179                         log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1180
1181                 macro_rules! get_htlc_in_commitment {
1182                         ($htlc: expr, $offered: expr) => {
1183                                 HTLCOutputInCommitment {
1184                                         offered: $offered,
1185                                         amount_msat: $htlc.amount_msat,
1186                                         cltv_expiry: $htlc.cltv_expiry,
1187                                         payment_hash: $htlc.payment_hash,
1188                                         transaction_output_index: None
1189                                 }
1190                         }
1191                 }
1192
1193                 macro_rules! add_htlc_output {
1194                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1195                                 if $outbound == local { // "offered HTLC output"
1196                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1197                                         let htlc_tx_fee = if self.opt_anchors() {
1198                                                 0
1199                                         } else {
1200                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000
1201                                         };
1202                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1203                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1204                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1205                                         } else {
1206                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1207                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1208                                         }
1209                                 } else {
1210                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1211                                         let htlc_tx_fee = if self.opt_anchors() {
1212                                                 0
1213                                         } else {
1214                                                 feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000
1215                                         };
1216                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1217                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1218                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1219                                         } else {
1220                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1221                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1222                                         }
1223                                 }
1224                         }
1225                 }
1226
1227                 for ref htlc in self.pending_inbound_htlcs.iter() {
1228                         let (include, state_name) = match htlc.state {
1229                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1230                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1231                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1232                                 InboundHTLCState::Committed => (true, "Committed"),
1233                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1234                         };
1235
1236                         if include {
1237                                 add_htlc_output!(htlc, false, None, state_name);
1238                                 remote_htlc_total_msat += htlc.amount_msat;
1239                         } else {
1240                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
1241                                 match &htlc.state {
1242                                         &InboundHTLCState::LocalRemoved(ref reason) => {
1243                                                 if generated_by_local {
1244                                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1245                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
1246                                                         }
1247                                                 }
1248                                         },
1249                                         _ => {},
1250                                 }
1251                         }
1252                 }
1253
1254                 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1255
1256                 for ref htlc in self.pending_outbound_htlcs.iter() {
1257                         let (include, state_name) = match htlc.state {
1258                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1259                                 OutboundHTLCState::Committed => (true, "Committed"),
1260                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1261                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1262                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1263                         };
1264
1265                         let preimage_opt = match htlc.state {
1266                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1267                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1268                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1269                                 _ => None,
1270                         };
1271
1272                         if let Some(preimage) = preimage_opt {
1273                                 preimages.push(preimage);
1274                         }
1275
1276                         if include {
1277                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1278                                 local_htlc_total_msat += htlc.amount_msat;
1279                         } else {
1280                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
1281                                 match htlc.state {
1282                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1283                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
1284                                         },
1285                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1286                                                 if !generated_by_local {
1287                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
1288                                                 }
1289                                         },
1290                                         _ => {},
1291                                 }
1292                         }
1293                 }
1294
1295                 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1296                 assert!(value_to_self_msat >= 0);
1297                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1298                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1299                 // "violate" their reserve value by couting those against it. Thus, we have to convert
1300                 // everything to i64 before subtracting as otherwise we can overflow.
1301                 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1302                 assert!(value_to_remote_msat >= 0);
1303
1304                 #[cfg(debug_assertions)]
1305                 {
1306                         // Make sure that the to_self/to_remote is always either past the appropriate
1307                         // channel_reserve *or* it is making progress towards it.
1308                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1309                                 self.holder_max_commitment_tx_output.lock().unwrap()
1310                         } else {
1311                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
1312                         };
1313                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1314                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1315                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1316                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1317                 }
1318
1319                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some());
1320                 let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1321                 let (value_to_self, value_to_remote) = if self.is_outbound() {
1322                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1323                 } else {
1324                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1325                 };
1326
1327                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1328                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1329                 let (funding_pubkey_a, funding_pubkey_b) = if local {
1330                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1331                 } else {
1332                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1333                 };
1334
1335                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1336                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1337                 } else {
1338                         value_to_a = 0;
1339                 }
1340
1341                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1342                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1343                 } else {
1344                         value_to_b = 0;
1345                 }
1346
1347                 let num_nondust_htlcs = included_non_dust_htlcs.len();
1348
1349                 let channel_parameters =
1350                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1351                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1352                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1353                                                                              value_to_a as u64,
1354                                                                              value_to_b as u64,
1355                                                                              self.channel_transaction_parameters.opt_anchors.is_some(),
1356                                                                              funding_pubkey_a,
1357                                                                              funding_pubkey_b,
1358                                                                              keys.clone(),
1359                                                                              feerate_per_kw,
1360                                                                              &mut included_non_dust_htlcs,
1361                                                                              &channel_parameters
1362                 );
1363                 let mut htlcs_included = included_non_dust_htlcs;
1364                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1365                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1366                 htlcs_included.append(&mut included_dust_htlcs);
1367
1368                 // For the stats, trimmed-to-0 the value in msats accordingly
1369                 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1370                 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1371
1372                 CommitmentStats {
1373                         tx,
1374                         feerate_per_kw,
1375                         total_fee_sat,
1376                         num_nondust_htlcs,
1377                         htlcs_included,
1378                         local_balance_msat: value_to_self_msat as u64,
1379                         remote_balance_msat: value_to_remote_msat as u64,
1380                         preimages
1381                 }
1382         }
1383
1384         #[inline]
1385         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1386         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1387         /// our counterparty!)
1388         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1389         /// TODO Some magic rust shit to compile-time check this?
1390         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1391                 let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx);
1392                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1393                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1394                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1395
1396                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1397         }
1398
1399         #[inline]
1400         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1401         /// will sign and send to our counterparty.
1402         /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
1403         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1404                 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1405                 //may see payments to it!
1406                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1407                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1408                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1409
1410                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1411         }
1412
1413         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1414         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1415         /// Panics if called before accept_channel/new_from_req
1416         pub fn get_funding_redeemscript(&self) -> Script {
1417                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1418         }
1419
1420         fn counterparty_funding_pubkey(&self) -> &PublicKey {
1421                 &self.get_counterparty_pubkeys().funding_pubkey
1422         }
1423
1424         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1425                 self.feerate_per_kw
1426         }
1427
1428         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1429                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1430                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1431                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1432                 // more dust balance if the feerate increases when we have several HTLCs pending
1433                 // which are near the dust limit.
1434                 let mut feerate_per_kw = self.feerate_per_kw;
1435                 // If there's a pending update fee, use it to ensure we aren't under-estimating
1436                 // potential feerate updates coming soon.
1437                 if let Some((feerate, _)) = self.pending_update_fee {
1438                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1439                 }
1440                 if let Some(feerate) = outbound_feerate_update {
1441                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1442                 }
1443                 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1444         }
1445
1446         /// Get forwarding information for the counterparty.
1447         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1448                 self.counterparty_forwarding_info.clone()
1449         }
1450
1451         /// Returns a HTLCStats about inbound pending htlcs
1452         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1453                 let context = self;
1454                 let mut stats = HTLCStats {
1455                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1456                         pending_htlcs_value_msat: 0,
1457                         on_counterparty_tx_dust_exposure_msat: 0,
1458                         on_holder_tx_dust_exposure_msat: 0,
1459                         holding_cell_msat: 0,
1460                         on_holder_tx_holding_cell_htlcs_count: 0,
1461                 };
1462
1463                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
1464                         (0, 0)
1465                 } else {
1466                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1467                         (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
1468                                 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
1469                 };
1470                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1471                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1472                 for ref htlc in context.pending_inbound_htlcs.iter() {
1473                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1474                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1475                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1476                         }
1477                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1478                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1479                         }
1480                 }
1481                 stats
1482         }
1483
1484         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1485         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1486                 let context = self;
1487                 let mut stats = HTLCStats {
1488                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1489                         pending_htlcs_value_msat: 0,
1490                         on_counterparty_tx_dust_exposure_msat: 0,
1491                         on_holder_tx_dust_exposure_msat: 0,
1492                         holding_cell_msat: 0,
1493                         on_holder_tx_holding_cell_htlcs_count: 0,
1494                 };
1495
1496                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
1497                         (0, 0)
1498                 } else {
1499                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1500                         (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
1501                                 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
1502                 };
1503                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1504                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1505                 for ref htlc in context.pending_outbound_htlcs.iter() {
1506                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1507                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1508                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1509                         }
1510                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1511                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1512                         }
1513                 }
1514
1515                 for update in context.holding_cell_htlc_updates.iter() {
1516                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1517                                 stats.pending_htlcs += 1;
1518                                 stats.pending_htlcs_value_msat += amount_msat;
1519                                 stats.holding_cell_msat += amount_msat;
1520                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1521                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1522                                 }
1523                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1524                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
1525                                 } else {
1526                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
1527                                 }
1528                         }
1529                 }
1530                 stats
1531         }
1532
1533         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1534         /// Doesn't bother handling the
1535         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1536         /// corner case properly.
1537         pub fn get_available_balances(&self) -> AvailableBalances {
1538                 let context = &self;
1539                 // Note that we have to handle overflow due to the above case.
1540                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1541                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1542
1543                 let mut balance_msat = context.value_to_self_msat;
1544                 for ref htlc in context.pending_inbound_htlcs.iter() {
1545                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1546                                 balance_msat += htlc.amount_msat;
1547                         }
1548                 }
1549                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1550
1551                 let outbound_capacity_msat = context.value_to_self_msat
1552                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1553                                 .saturating_sub(
1554                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1555
1556                 let mut available_capacity_msat = outbound_capacity_msat;
1557
1558                 if context.is_outbound() {
1559                         // We should mind channel commit tx fee when computing how much of the available capacity
1560                         // can be used in the next htlc. Mirrors the logic in send_htlc.
1561                         //
1562                         // The fee depends on whether the amount we will be sending is above dust or not,
1563                         // and the answer will in turn change the amount itself â€” making it a circular
1564                         // dependency.
1565                         // This complicates the computation around dust-values, up to the one-htlc-value.
1566                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1567                         if !context.opt_anchors() {
1568                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
1569                         }
1570
1571                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1572                         let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1573                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1574                         let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1575
1576                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
1577                         // value ends up being below dust, we have this fee available again. In that case,
1578                         // match the value to right-below-dust.
1579                         let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
1580                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1581                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1582                                 debug_assert!(one_htlc_difference_msat != 0);
1583                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1584                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1585                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1586                         } else {
1587                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1588                         }
1589                 } else {
1590                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1591                         // sending a new HTLC won't reduce their balance below our reserve threshold.
1592                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1593                         if !context.opt_anchors() {
1594                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
1595                         }
1596
1597                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1598                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1599
1600                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1601                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1602                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1603
1604                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
1605                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1606                                 // we've selected for them, we can only send dust HTLCs.
1607                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1608                         }
1609                 }
1610
1611                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1612
1613                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1614                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1615                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1616                 // send above the dust limit (as the router can always overpay to meet the dust limit).
1617                 let mut remaining_msat_below_dust_exposure_limit = None;
1618                 let mut dust_exposure_dust_limit_msat = 0;
1619
1620                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
1621                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1622                 } else {
1623                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1624                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
1625                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
1626                 };
1627                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1628                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
1629                         remaining_msat_below_dust_exposure_limit =
1630                                 Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1631                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1632                 }
1633
1634                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1635                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
1636                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1637                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1638                                 context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
1639                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1640                 }
1641
1642                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1643                         if available_capacity_msat < dust_exposure_dust_limit_msat {
1644                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1645                         } else {
1646                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1647                         }
1648                 }
1649
1650                 available_capacity_msat = cmp::min(available_capacity_msat,
1651                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1652
1653                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1654                         available_capacity_msat = 0;
1655                 }
1656
1657                 AvailableBalances {
1658                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1659                                         - context.value_to_self_msat as i64
1660                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1661                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1662                                 0) as u64,
1663                         outbound_capacity_msat,
1664                         next_outbound_htlc_limit_msat: available_capacity_msat,
1665                         next_outbound_htlc_minimum_msat,
1666                         balance_msat,
1667                 }
1668         }
1669
1670         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1671                 let context = &self;
1672                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1673         }
1674
1675         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1676         /// number of pending HTLCs that are on track to be in our next commitment tx.
1677         ///
1678         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1679         /// `fee_spike_buffer_htlc` is `Some`.
1680         ///
1681         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1682         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1683         ///
1684         /// Dust HTLCs are excluded.
1685         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1686                 let context = &self;
1687                 assert!(context.is_outbound());
1688
1689                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
1690                         (0, 0)
1691                 } else {
1692                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
1693                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
1694                 };
1695                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1696                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1697
1698                 let mut addl_htlcs = 0;
1699                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1700                 match htlc.origin {
1701                         HTLCInitiator::LocalOffered => {
1702                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1703                                         addl_htlcs += 1;
1704                                 }
1705                         },
1706                         HTLCInitiator::RemoteOffered => {
1707                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1708                                         addl_htlcs += 1;
1709                                 }
1710                         }
1711                 }
1712
1713                 let mut included_htlcs = 0;
1714                 for ref htlc in context.pending_inbound_htlcs.iter() {
1715                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1716                                 continue
1717                         }
1718                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1719                         // transaction including this HTLC if it times out before they RAA.
1720                         included_htlcs += 1;
1721                 }
1722
1723                 for ref htlc in context.pending_outbound_htlcs.iter() {
1724                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1725                                 continue
1726                         }
1727                         match htlc.state {
1728                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1729                                 OutboundHTLCState::Committed => included_htlcs += 1,
1730                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1731                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1732                                 // transaction won't be generated until they send us their next RAA, which will mean
1733                                 // dropping any HTLCs in this state.
1734                                 _ => {},
1735                         }
1736                 }
1737
1738                 for htlc in context.holding_cell_htlc_updates.iter() {
1739                         match htlc {
1740                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1741                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
1742                                                 continue
1743                                         }
1744                                         included_htlcs += 1
1745                                 },
1746                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1747                                          // ack we're guaranteed to never include them in commitment txs anymore.
1748                         }
1749                 }
1750
1751                 let num_htlcs = included_htlcs + addl_htlcs;
1752                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
1753                 #[cfg(any(test, fuzzing))]
1754                 {
1755                         let mut fee = res;
1756                         if fee_spike_buffer_htlc.is_some() {
1757                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
1758                         }
1759                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1760                                 + context.holding_cell_htlc_updates.len();
1761                         let commitment_tx_info = CommitmentTxInfoCached {
1762                                 fee,
1763                                 total_pending_htlcs,
1764                                 next_holder_htlc_id: match htlc.origin {
1765                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1766                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1767                                 },
1768                                 next_counterparty_htlc_id: match htlc.origin {
1769                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1770                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1771                                 },
1772                                 feerate: context.feerate_per_kw,
1773                         };
1774                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1775                 }
1776                 res
1777         }
1778
1779         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1780         /// pending HTLCs that are on track to be in their next commitment tx
1781         ///
1782         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1783         /// `fee_spike_buffer_htlc` is `Some`.
1784         ///
1785         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1786         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1787         ///
1788         /// Dust HTLCs are excluded.
1789         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1790                 let context = &self;
1791                 assert!(!context.is_outbound());
1792
1793                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
1794                         (0, 0)
1795                 } else {
1796                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
1797                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
1798                 };
1799                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1800                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1801
1802                 let mut addl_htlcs = 0;
1803                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1804                 match htlc.origin {
1805                         HTLCInitiator::LocalOffered => {
1806                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1807                                         addl_htlcs += 1;
1808                                 }
1809                         },
1810                         HTLCInitiator::RemoteOffered => {
1811                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1812                                         addl_htlcs += 1;
1813                                 }
1814                         }
1815                 }
1816
1817                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1818                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1819                 // committed outbound HTLCs, see below.
1820                 let mut included_htlcs = 0;
1821                 for ref htlc in context.pending_inbound_htlcs.iter() {
1822                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1823                                 continue
1824                         }
1825                         included_htlcs += 1;
1826                 }
1827
1828                 for ref htlc in context.pending_outbound_htlcs.iter() {
1829                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1830                                 continue
1831                         }
1832                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1833                         // i.e. if they've responded to us with an RAA after announcement.
1834                         match htlc.state {
1835                                 OutboundHTLCState::Committed => included_htlcs += 1,
1836                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1837                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1838                                 _ => {},
1839                         }
1840                 }
1841
1842                 let num_htlcs = included_htlcs + addl_htlcs;
1843                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
1844                 #[cfg(any(test, fuzzing))]
1845                 {
1846                         let mut fee = res;
1847                         if fee_spike_buffer_htlc.is_some() {
1848                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
1849                         }
1850                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1851                         let commitment_tx_info = CommitmentTxInfoCached {
1852                                 fee,
1853                                 total_pending_htlcs,
1854                                 next_holder_htlc_id: match htlc.origin {
1855                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1856                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1857                                 },
1858                                 next_counterparty_htlc_id: match htlc.origin {
1859                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1860                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1861                                 },
1862                                 feerate: context.feerate_per_kw,
1863                         };
1864                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1865                 }
1866                 res
1867         }
1868
1869         /// Returns transaction if there is pending funding transaction that is yet to broadcast
1870         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
1871                 if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
1872                         self.funding_transaction.clone()
1873                 } else {
1874                         None
1875                 }
1876         }
1877 }
1878
1879 // Internal utility functions for channels
1880
1881 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
1882 /// `channel_value_satoshis` in msat, set through
1883 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
1884 ///
1885 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
1886 ///
1887 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
1888 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
1889         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
1890                 1
1891         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
1892                 100
1893         } else {
1894                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
1895         };
1896         channel_value_satoshis * 10 * configured_percent
1897 }
1898
1899 /// Returns a minimum channel reserve value the remote needs to maintain,
1900 /// required by us according to the configured or default
1901 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
1902 ///
1903 /// Guaranteed to return a value no larger than channel_value_satoshis
1904 ///
1905 /// This is used both for outbound and inbound channels and has lower bound
1906 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
1907 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
1908         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
1909         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
1910 }
1911
1912 /// This is for legacy reasons, present for forward-compatibility.
1913 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
1914 /// from storage. Hence, we use this function to not persist default values of
1915 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
1916 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
1917         let (q, _) = channel_value_satoshis.overflowing_div(100);
1918         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
1919 }
1920
1921 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
1922 // Note that num_htlcs should not include dust HTLCs.
1923 #[inline]
1924 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
1925         feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
1926 }
1927
1928 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
1929 // Note that num_htlcs should not include dust HTLCs.
1930 fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
1931         // Note that we need to divide before multiplying to round properly,
1932         // since the lowest denomination of bitcoin on-chain is the satoshi.
1933         (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
1934 }
1935
1936 // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
1937 // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
1938 // calling channel_id() before we're set up or things like get_outbound_funding_signed on an
1939 // inbound channel.
1940 //
1941 // Holder designates channel data owned for the benefit of the user client.
1942 // Counterparty designates channel data owned by the another channel participant entity.
1943 pub(super) struct Channel<Signer: ChannelSigner> {
1944         pub context: ChannelContext<Signer>,
1945 }
1946
1947 #[cfg(any(test, fuzzing))]
1948 struct CommitmentTxInfoCached {
1949         fee: u64,
1950         total_pending_htlcs: usize,
1951         next_holder_htlc_id: u64,
1952         next_counterparty_htlc_id: u64,
1953         feerate: u32,
1954 }
1955
1956 impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
1957         /// If we receive an error message, it may only be a rejection of the channel type we tried,
1958         /// not of our ability to open any channel at all. Thus, on error, we should first call this
1959         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
1960         pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
1961                 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
1962                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
1963                         // We've exhausted our options
1964                         return Err(());
1965                 }
1966                 // We support opening a few different types of channels. Try removing our additional
1967                 // features one by one until we've either arrived at our default or the counterparty has
1968                 // accepted one.
1969                 //
1970                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
1971                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
1972                 // checks whether the counterparty supports every feature, this would only happen if the
1973                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
1974                 // whatever reason.
1975                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
1976                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
1977                         assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none());
1978                         self.context.channel_transaction_parameters.opt_anchors = None;
1979                 } else if self.context.channel_type.supports_scid_privacy() {
1980                         self.context.channel_type.clear_scid_privacy();
1981                 } else {
1982                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
1983                 }
1984                 Ok(self.get_open_channel(chain_hash))
1985         }
1986
1987         // Constructors:
1988
1989         fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
1990                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
1991                 -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
1992         {
1993                 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
1994                 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
1995                 // We generally don't care too much if they set the feerate to something very high, but it
1996                 // could result in the channel being useless due to everything being dust.
1997                 let upper_limit = cmp::max(250 * 25,
1998                         fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
1999                 if feerate_per_kw as u64 > upper_limit {
2000                         return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2001                 }
2002                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
2003                 // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
2004                 // occasional issues with feerate disagreements between an initiator that wants a feerate
2005                 // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
2006                 // sat/kw before the comparison here.
2007                 if feerate_per_kw + 250 < lower_limit {
2008                         if let Some(cur_feerate) = cur_feerate_per_kw {
2009                                 if feerate_per_kw > cur_feerate {
2010                                         log_warn!(logger,
2011                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2012                                                 cur_feerate, feerate_per_kw);
2013                                         return Ok(());
2014                                 }
2015                         }
2016                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
2017                 }
2018                 Ok(())
2019         }
2020
2021         /// Creates a new channel from a remote sides' request for one.
2022         /// Assumes chain_hash has already been checked and corresponds with what we expect!
2023         pub fn new_from_req<ES: Deref, SP: Deref, F: Deref, L: Deref>(
2024                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
2025                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
2026                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
2027                 current_chain_height: u32, logger: &L, outbound_scid_alias: u64
2028         ) -> Result<Channel<Signer>, ChannelError>
2029                 where ES::Target: EntropySource,
2030                           SP::Target: SignerProvider<Signer = Signer>,
2031                           F::Target: FeeEstimator,
2032                           L::Target: Logger,
2033         {
2034                 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
2035
2036                 // First check the channel type is known, failing before we do anything else if we don't
2037                 // support this channel type.
2038                 let channel_type = if let Some(channel_type) = &msg.channel_type {
2039                         if channel_type.supports_any_optional_bits() {
2040                                 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
2041                         }
2042
2043                         // We only support the channel types defined by the `ChannelManager` in
2044                         // `provided_channel_type_features`. The channel type must always support
2045                         // `static_remote_key`.
2046                         if !channel_type.requires_static_remote_key() {
2047                                 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
2048                         }
2049                         // Make sure we support all of the features behind the channel type.
2050                         if !channel_type.is_subset(our_supported_features) {
2051                                 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
2052                         }
2053                         if channel_type.requires_scid_privacy() && announced_channel {
2054                                 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
2055                         }
2056                         channel_type.clone()
2057                 } else {
2058                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
2059                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
2060                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
2061                         }
2062                         channel_type
2063                 };
2064                 let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
2065
2066                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
2067                 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
2068                 let pubkeys = holder_signer.pubkeys().clone();
2069                 let counterparty_pubkeys = ChannelPublicKeys {
2070                         funding_pubkey: msg.funding_pubkey,
2071                         revocation_basepoint: msg.revocation_basepoint,
2072                         payment_point: msg.payment_point,
2073                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
2074                         htlc_basepoint: msg.htlc_basepoint
2075                 };
2076
2077                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
2078                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
2079                 }
2080
2081                 // Check sanity of message fields:
2082                 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
2083                         return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
2084                 }
2085                 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
2086                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
2087                 }
2088                 if msg.channel_reserve_satoshis > msg.funding_satoshis {
2089                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
2090                 }
2091                 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
2092                 if msg.push_msat > full_channel_value_msat {
2093                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
2094                 }
2095                 if msg.dust_limit_satoshis > msg.funding_satoshis {
2096                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
2097                 }
2098                 if msg.htlc_minimum_msat >= full_channel_value_msat {
2099                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
2100                 }
2101                 Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
2102
2103                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
2104                 if msg.to_self_delay > max_counterparty_selected_contest_delay {
2105                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
2106                 }
2107                 if msg.max_accepted_htlcs < 1 {
2108                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
2109                 }
2110                 if msg.max_accepted_htlcs > MAX_HTLCS {
2111                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
2112                 }
2113
2114                 // Now check against optional parameters as set by config...
2115                 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
2116                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
2117                 }
2118                 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
2119                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
2120                 }
2121                 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
2122                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
2123                 }
2124                 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
2125                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
2126                 }
2127                 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
2128                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
2129                 }
2130                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2131                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
2132                 }
2133                 if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
2134                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
2135                 }
2136
2137                 // Convert things into internal flags and prep our state:
2138
2139                 if config.channel_handshake_limits.force_announced_channel_preference {
2140                         if config.channel_handshake_config.announced_channel != announced_channel {
2141                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
2142                         }
2143                 }
2144
2145                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
2146                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2147                         // Protocol level safety check in place, although it should never happen because
2148                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
2149                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
2150                 }
2151                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
2152                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
2153                 }
2154                 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2155                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
2156                                 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
2157                 }
2158                 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
2159                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
2160                 }
2161
2162                 // check if the funder's amount for the initial commitment tx is sufficient
2163                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
2164                 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
2165                 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
2166                 if funders_amount_msat / 1000 < commitment_tx_fee {
2167                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
2168                 }
2169
2170                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
2171                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
2172                 // want to push much to us), our counterparty should always have more than our reserve.
2173                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
2174                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
2175                 }
2176
2177                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
2178                         match &msg.shutdown_scriptpubkey {
2179                                 &Some(ref script) => {
2180                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
2181                                         if script.len() == 0 {
2182                                                 None
2183                                         } else {
2184                                                 if !script::is_bolt2_compliant(&script, their_features) {
2185                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
2186                                                 }
2187                                                 Some(script.clone())
2188                                         }
2189                                 },
2190                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
2191                                 &None => {
2192                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
2193                                 }
2194                         }
2195                 } else { None };
2196
2197                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
2198                         match signer_provider.get_shutdown_scriptpubkey() {
2199                                 Ok(scriptpubkey) => Some(scriptpubkey),
2200                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
2201                         }
2202                 } else { None };
2203
2204                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
2205                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
2206                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
2207                         }
2208                 }
2209
2210                 let destination_script = match signer_provider.get_destination_script() {
2211                         Ok(script) => script,
2212                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
2213                 };
2214
2215                 let mut secp_ctx = Secp256k1::new();
2216                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
2217
2218                 let chan = Channel {
2219                         context: ChannelContext {
2220                                 user_id,
2221
2222                                 config: LegacyChannelConfig {
2223                                         options: config.channel_config.clone(),
2224                                         announced_channel,
2225                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
2226                                 },
2227
2228                                 prev_config: None,
2229
2230                                 inbound_handshake_limits_override: None,
2231
2232                                 temporary_channel_id: Some(msg.temporary_channel_id),
2233                                 channel_id: msg.temporary_channel_id,
2234                                 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
2235                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
2236                                 secp_ctx,
2237
2238                                 latest_monitor_update_id: 0,
2239
2240                                 holder_signer,
2241                                 shutdown_scriptpubkey,
2242                                 destination_script,
2243
2244                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
2245                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
2246                                 value_to_self_msat: msg.push_msat,
2247
2248                                 pending_inbound_htlcs: Vec::new(),
2249                                 pending_outbound_htlcs: Vec::new(),
2250                                 holding_cell_htlc_updates: Vec::new(),
2251                                 pending_update_fee: None,
2252                                 holding_cell_update_fee: None,
2253                                 next_holder_htlc_id: 0,
2254                                 next_counterparty_htlc_id: 0,
2255                                 update_time_counter: 1,
2256
2257                                 resend_order: RAACommitmentOrder::CommitmentFirst,
2258
2259                                 monitor_pending_channel_ready: false,
2260                                 monitor_pending_revoke_and_ack: false,
2261                                 monitor_pending_commitment_signed: false,
2262                                 monitor_pending_forwards: Vec::new(),
2263                                 monitor_pending_failures: Vec::new(),
2264                                 monitor_pending_finalized_fulfills: Vec::new(),
2265
2266                                 #[cfg(debug_assertions)]
2267                                 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
2268                                 #[cfg(debug_assertions)]
2269                                 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
2270
2271                                 last_sent_closing_fee: None,
2272                                 pending_counterparty_closing_signed: None,
2273                                 closing_fee_limits: None,
2274                                 target_closing_feerate_sats_per_kw: None,
2275
2276                                 inbound_awaiting_accept: true,
2277
2278                                 funding_tx_confirmed_in: None,
2279                                 funding_tx_confirmation_height: 0,
2280                                 short_channel_id: None,
2281                                 channel_creation_height: current_chain_height,
2282
2283                                 feerate_per_kw: msg.feerate_per_kw,
2284                                 channel_value_satoshis: msg.funding_satoshis,
2285                                 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
2286                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
2287                                 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
2288                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
2289                                 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
2290                                 holder_selected_channel_reserve_satoshis,
2291                                 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
2292                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2293                                 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
2294                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2295                                 minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
2296
2297                                 counterparty_forwarding_info: None,
2298
2299                                 channel_transaction_parameters: ChannelTransactionParameters {
2300                                         holder_pubkeys: pubkeys,
2301                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2302                                         is_outbound_from_holder: false,
2303                                         counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
2304                                                 selected_contest_delay: msg.to_self_delay,
2305                                                 pubkeys: counterparty_pubkeys,
2306                                         }),
2307                                         funding_outpoint: None,
2308                                         opt_anchors: if opt_anchors { Some(()) } else { None },
2309                                         opt_non_zero_fee_anchors: None
2310                                 },
2311                                 funding_transaction: None,
2312
2313                                 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
2314                                 counterparty_prev_commitment_point: None,
2315                                 counterparty_node_id,
2316
2317                                 counterparty_shutdown_scriptpubkey,
2318
2319                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
2320
2321                                 channel_update_status: ChannelUpdateStatus::Enabled,
2322                                 closing_signed_in_flight: false,
2323
2324                                 announcement_sigs: None,
2325
2326                                 #[cfg(any(test, fuzzing))]
2327                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2328                                 #[cfg(any(test, fuzzing))]
2329                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2330
2331                                 workaround_lnd_bug_4006: None,
2332                                 sent_message_awaiting_response: None,
2333
2334                                 latest_inbound_scid_alias: None,
2335                                 outbound_scid_alias,
2336
2337                                 channel_pending_event_emitted: false,
2338                                 channel_ready_event_emitted: false,
2339
2340                                 #[cfg(any(test, fuzzing))]
2341                                 historical_inbound_htlc_fulfills: HashSet::new(),
2342
2343                                 channel_type,
2344                                 channel_keys_id,
2345
2346                                 pending_monitor_updates: Vec::new(),
2347                         }
2348                 };
2349
2350                 Ok(chan)
2351         }
2352
2353         #[inline]
2354         fn get_closing_scriptpubkey(&self) -> Script {
2355                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2356                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2357                 // outside of those situations will fail.
2358                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2359         }
2360
2361         #[inline]
2362         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2363                 let mut ret =
2364                 (4 +                                                   // version
2365                  1 +                                                   // input count
2366                  36 +                                                  // prevout
2367                  1 +                                                   // script length (0)
2368                  4 +                                                   // sequence
2369                  1 +                                                   // output count
2370                  4                                                     // lock time
2371                  )*4 +                                                 // * 4 for non-witness parts
2372                 2 +                                                    // witness marker and flag
2373                 1 +                                                    // witness element count
2374                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
2375                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2376                 2*(1 + 71);                                            // two signatures + sighash type flags
2377                 if let Some(spk) = a_scriptpubkey {
2378                         ret += ((8+1) +                                    // output values and script length
2379                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2380                 }
2381                 if let Some(spk) = b_scriptpubkey {
2382                         ret += ((8+1) +                                    // output values and script length
2383                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2384                 }
2385                 ret
2386         }
2387
2388         #[inline]
2389         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2390                 assert!(self.context.pending_inbound_htlcs.is_empty());
2391                 assert!(self.context.pending_outbound_htlcs.is_empty());
2392                 assert!(self.context.pending_update_fee.is_none());
2393
2394                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2395                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2396                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2397
2398                 if value_to_holder < 0 {
2399                         assert!(self.context.is_outbound());
2400                         total_fee_satoshis += (-value_to_holder) as u64;
2401                 } else if value_to_counterparty < 0 {
2402                         assert!(!self.context.is_outbound());
2403                         total_fee_satoshis += (-value_to_counterparty) as u64;
2404                 }
2405
2406                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2407                         value_to_counterparty = 0;
2408                 }
2409
2410                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2411                         value_to_holder = 0;
2412                 }
2413
2414                 assert!(self.context.shutdown_scriptpubkey.is_some());
2415                 let holder_shutdown_script = self.get_closing_scriptpubkey();
2416                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2417                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2418
2419                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2420                 (closing_transaction, total_fee_satoshis)
2421         }
2422
2423         fn funding_outpoint(&self) -> OutPoint {
2424                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2425         }
2426
2427         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2428         /// entirely.
2429         ///
2430         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2431         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2432         ///
2433         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2434         /// disconnected).
2435         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2436                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2437         where L::Target: Logger {
2438                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2439                 // (see equivalent if condition there).
2440                 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2441                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2442                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2443                 self.context.latest_monitor_update_id = mon_update_id;
2444                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2445                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2446                 }
2447         }
2448
2449         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2450                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2451                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2452                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2453                 // either.
2454                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2455                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2456                 }
2457                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2458
2459                 let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
2460
2461                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2462                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2463                 // these, but for now we just have to treat them as normal.
2464
2465                 let mut pending_idx = core::usize::MAX;
2466                 let mut htlc_value_msat = 0;
2467                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2468                         if htlc.htlc_id == htlc_id_arg {
2469                                 assert_eq!(htlc.payment_hash, payment_hash_calc);
2470                                 match htlc.state {
2471                                         InboundHTLCState::Committed => {},
2472                                         InboundHTLCState::LocalRemoved(ref reason) => {
2473                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2474                                                 } else {
2475                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
2476                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2477                                                 }
2478                                                 return UpdateFulfillFetch::DuplicateClaim {};
2479                                         },
2480                                         _ => {
2481                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2482                                                 // Don't return in release mode here so that we can update channel_monitor
2483                                         }
2484                                 }
2485                                 pending_idx = idx;
2486                                 htlc_value_msat = htlc.amount_msat;
2487                                 break;
2488                         }
2489                 }
2490                 if pending_idx == core::usize::MAX {
2491                         #[cfg(any(test, fuzzing))]
2492                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2493                         // this is simply a duplicate claim, not previously failed and we lost funds.
2494                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2495                         return UpdateFulfillFetch::DuplicateClaim {};
2496                 }
2497
2498                 // Now update local state:
2499                 //
2500                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2501                 // can claim it even if the channel hits the chain before we see their next commitment.
2502                 self.context.latest_monitor_update_id += 1;
2503                 let monitor_update = ChannelMonitorUpdate {
2504                         update_id: self.context.latest_monitor_update_id,
2505                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2506                                 payment_preimage: payment_preimage_arg.clone(),
2507                         }],
2508                 };
2509
2510                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2511                         // Note that this condition is the same as the assertion in
2512                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2513                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2514                         // do not not get into this branch.
2515                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2516                                 match pending_update {
2517                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2518                                                 if htlc_id_arg == htlc_id {
2519                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
2520                                                         self.context.latest_monitor_update_id -= 1;
2521                                                         #[cfg(any(test, fuzzing))]
2522                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2523                                                         return UpdateFulfillFetch::DuplicateClaim {};
2524                                                 }
2525                                         },
2526                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2527                                                 if htlc_id_arg == htlc_id {
2528                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
2529                                                         // TODO: We may actually be able to switch to a fulfill here, though its
2530                                                         // rare enough it may not be worth the complexity burden.
2531                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2532                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2533                                                 }
2534                                         },
2535                                         _ => {}
2536                                 }
2537                         }
2538                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
2539                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2540                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2541                         });
2542                         #[cfg(any(test, fuzzing))]
2543                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2544                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2545                 }
2546                 #[cfg(any(test, fuzzing))]
2547                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2548
2549                 {
2550                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2551                         if let InboundHTLCState::Committed = htlc.state {
2552                         } else {
2553                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2554                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2555                         }
2556                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
2557                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2558                 }
2559
2560                 UpdateFulfillFetch::NewClaim {
2561                         monitor_update,
2562                         htlc_value_msat,
2563                         msg: Some(msgs::UpdateFulfillHTLC {
2564                                 channel_id: self.context.channel_id(),
2565                                 htlc_id: htlc_id_arg,
2566                                 payment_preimage: payment_preimage_arg,
2567                         }),
2568                 }
2569         }
2570
2571         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2572                 let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
2573                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2574                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2575                                 // Even if we aren't supposed to let new monitor updates with commitment state
2576                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2577                                 // matter what. Sadly, to push a new monitor update which flies before others
2578                                 // already queued, we have to insert it into the pending queue and update the
2579                                 // update_ids of all the following monitors.
2580                                 let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
2581                                         let mut additional_update = self.build_commitment_no_status_check(logger);
2582                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
2583                                         // to be strictly increasing by one, so decrement it here.
2584                                         self.context.latest_monitor_update_id = monitor_update.update_id;
2585                                         monitor_update.updates.append(&mut additional_update.updates);
2586                                         self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
2587                                                 update: monitor_update, blocked: false,
2588                                         });
2589                                         self.context.pending_monitor_updates.len() - 1
2590                                 } else {
2591                                         let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
2592                                                 .unwrap_or(self.context.pending_monitor_updates.len());
2593                                         let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
2594                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2595                                         monitor_update.update_id = new_mon_id;
2596                                         self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
2597                                                 update: monitor_update, blocked: false,
2598                                         });
2599                                         for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
2600                                                 held_update.update.update_id += 1;
2601                                         }
2602                                         if msg.is_some() {
2603                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2604                                                 let update = self.build_commitment_no_status_check(logger);
2605                                                 self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
2606                                                         update, blocked: true,
2607                                                 });
2608                                         }
2609                                         insert_pos
2610                                 };
2611                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2612                                 UpdateFulfillCommitFetch::NewClaim {
2613                                         monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
2614                                                 .expect("We just pushed the monitor update").update,
2615                                         htlc_value_msat,
2616                                 }
2617                         },
2618                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2619                 }
2620         }
2621
2622         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2623         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2624         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2625         /// before we fail backwards.
2626         ///
2627         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2628         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2629         /// [`ChannelError::Ignore`].
2630         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2631         -> Result<(), ChannelError> where L::Target: Logger {
2632                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2633                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2634         }
2635
2636         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2637         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2638         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2639         /// before we fail backwards.
2640         ///
2641         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2642         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2643         /// [`ChannelError::Ignore`].
2644         fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2645         -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2646                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2647                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
2648                 }
2649                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2650
2651                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2652                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2653                 // these, but for now we just have to treat them as normal.
2654
2655                 let mut pending_idx = core::usize::MAX;
2656                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2657                         if htlc.htlc_id == htlc_id_arg {
2658                                 match htlc.state {
2659                                         InboundHTLCState::Committed => {},
2660                                         InboundHTLCState::LocalRemoved(ref reason) => {
2661                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2662                                                 } else {
2663                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2664                                                 }
2665                                                 return Ok(None);
2666                                         },
2667                                         _ => {
2668                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2669                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2670                                         }
2671                                 }
2672                                 pending_idx = idx;
2673                         }
2674                 }
2675                 if pending_idx == core::usize::MAX {
2676                         #[cfg(any(test, fuzzing))]
2677                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2678                         // is simply a duplicate fail, not previously failed and we failed-back too early.
2679                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2680                         return Ok(None);
2681                 }
2682
2683                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2684                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2685                         force_holding_cell = true;
2686                 }
2687
2688                 // Now update local state:
2689                 if force_holding_cell {
2690                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2691                                 match pending_update {
2692                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2693                                                 if htlc_id_arg == htlc_id {
2694                                                         #[cfg(any(test, fuzzing))]
2695                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2696                                                         return Ok(None);
2697                                                 }
2698                                         },
2699                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2700                                                 if htlc_id_arg == htlc_id {
2701                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2702                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2703                                                 }
2704                                         },
2705                                         _ => {}
2706                                 }
2707                         }
2708                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
2709                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2710                                 htlc_id: htlc_id_arg,
2711                                 err_packet,
2712                         });
2713                         return Ok(None);
2714                 }
2715
2716                 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
2717                 {
2718                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2719                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2720                 }
2721
2722                 Ok(Some(msgs::UpdateFailHTLC {
2723                         channel_id: self.context.channel_id(),
2724                         htlc_id: htlc_id_arg,
2725                         reason: err_packet
2726                 }))
2727         }
2728
2729         // Message handlers:
2730
2731         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
2732                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
2733
2734                 // Check sanity of message fields:
2735                 if !self.context.is_outbound() {
2736                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
2737                 }
2738                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
2739                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
2740                 }
2741                 if msg.dust_limit_satoshis > 21000000 * 100000000 {
2742                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
2743                 }
2744                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
2745                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
2746                 }
2747                 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
2748                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
2749                 }
2750                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
2751                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
2752                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
2753                 }
2754                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
2755                 if msg.htlc_minimum_msat >= full_channel_value_msat {
2756                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
2757                 }
2758                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
2759                 if msg.to_self_delay > max_delay_acceptable {
2760                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
2761                 }
2762                 if msg.max_accepted_htlcs < 1 {
2763                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
2764                 }
2765                 if msg.max_accepted_htlcs > MAX_HTLCS {
2766                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
2767                 }
2768
2769                 // Now check against optional parameters as set by config...
2770                 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
2771                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
2772                 }
2773                 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
2774                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
2775                 }
2776                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
2777                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
2778                 }
2779                 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
2780                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
2781                 }
2782                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2783                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
2784                 }
2785                 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
2786                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
2787                 }
2788                 if msg.minimum_depth > peer_limits.max_minimum_depth {
2789                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
2790                 }
2791
2792                 if let Some(ty) = &msg.channel_type {
2793                         if *ty != self.context.channel_type {
2794                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
2795                         }
2796                 } else if their_features.supports_channel_type() {
2797                         // Assume they've accepted the channel type as they said they understand it.
2798                 } else {
2799                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
2800                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
2801                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
2802                         }
2803                         self.context.channel_type = channel_type;
2804                 }
2805
2806                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
2807                         match &msg.shutdown_scriptpubkey {
2808                                 &Some(ref script) => {
2809                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
2810                                         if script.len() == 0 {
2811                                                 None
2812                                         } else {
2813                                                 if !script::is_bolt2_compliant(&script, their_features) {
2814                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
2815                                                 }
2816                                                 Some(script.clone())
2817                                         }
2818                                 },
2819                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
2820                                 &None => {
2821                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
2822                                 }
2823                         }
2824                 } else { None };
2825
2826                 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
2827                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
2828                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
2829                 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
2830                 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
2831
2832                 if peer_limits.trust_own_funding_0conf {
2833                         self.context.minimum_depth = Some(msg.minimum_depth);
2834                 } else {
2835                         self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
2836                 }
2837
2838                 let counterparty_pubkeys = ChannelPublicKeys {
2839                         funding_pubkey: msg.funding_pubkey,
2840                         revocation_basepoint: msg.revocation_basepoint,
2841                         payment_point: msg.payment_point,
2842                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
2843                         htlc_basepoint: msg.htlc_basepoint
2844                 };
2845
2846                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
2847                         selected_contest_delay: msg.to_self_delay,
2848                         pubkeys: counterparty_pubkeys,
2849                 });
2850
2851                 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
2852                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
2853
2854                 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
2855                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
2856
2857                 Ok(())
2858         }
2859
2860         fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
2861                 let funding_script = self.context.get_funding_redeemscript();
2862
2863                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2864                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
2865                 {
2866                         let trusted_tx = initial_commitment_tx.trust();
2867                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2868                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2869                         // They sign the holder commitment transaction...
2870                         log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
2871                                 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
2872                                 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
2873                                 encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
2874                         secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
2875                 }
2876
2877                 let counterparty_keys = self.context.build_remote_transaction_keys();
2878                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2879
2880                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2881                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2882                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2883                         log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2884
2885                 let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
2886                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
2887
2888                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2889                 Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
2890         }
2891
2892         pub fn funding_created<SP: Deref, L: Deref>(
2893                 &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
2894         ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
2895         where
2896                 SP::Target: SignerProvider<Signer = Signer>,
2897                 L::Target: Logger
2898         {
2899                 if self.context.is_outbound() {
2900                         return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
2901                 }
2902                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
2903                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
2904                         // remember the channel, so it's safe to just send an error_message here and drop the
2905                         // channel.
2906                         return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
2907                 }
2908                 if self.context.inbound_awaiting_accept {
2909                         return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()));
2910                 }
2911                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2912                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2913                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2914                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2915                 }
2916
2917                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
2918                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
2919                 // This is an externally observable change before we finish all our checks.  In particular
2920                 // funding_created_signature may fail.
2921                 self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2922
2923                 let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
2924                         Ok(res) => res,
2925                         Err(ChannelError::Close(e)) => {
2926                                 self.context.channel_transaction_parameters.funding_outpoint = None;
2927                                 return Err(ChannelError::Close(e));
2928                         },
2929                         Err(e) => {
2930                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
2931                                 // to make sure we don't continue with an inconsistent state.
2932                                 panic!("unexpected error type from funding_created_signature {:?}", e);
2933                         }
2934                 };
2935
2936                 let holder_commitment_tx = HolderCommitmentTransaction::new(
2937                         initial_commitment_tx,
2938                         msg.signature,
2939                         Vec::new(),
2940                         &self.context.get_holder_pubkeys().funding_pubkey,
2941                         self.context.counterparty_funding_pubkey()
2942                 );
2943
2944                 self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
2945                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2946
2947                         // Now that we're past error-generating stuff, update our local state:
2948
2949                 let funding_redeemscript = self.context.get_funding_redeemscript();
2950                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2951                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2952                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2953                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2954                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2955                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2956                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
2957                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
2958                                                           &self.context.channel_transaction_parameters,
2959                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
2960                                                           obscure_factor,
2961                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
2962
2963                 channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
2964
2965                 self.context.channel_state = ChannelState::FundingSent as u32;
2966                 self.context.channel_id = funding_txo.to_channel_id();
2967                 self.context.cur_counterparty_commitment_transaction_number -= 1;
2968                 self.context.cur_holder_commitment_transaction_number -= 1;
2969
2970                 log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
2971
2972                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2973                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2974
2975                 Ok((msgs::FundingSigned {
2976                         channel_id: self.context.channel_id,
2977                         signature,
2978                         #[cfg(taproot)]
2979                         partial_signature_with_nonce: None,
2980                 }, channel_monitor))
2981         }
2982
2983         /// Handles a funding_signed message from the remote end.
2984         /// If this call is successful, broadcast the funding transaction (and not before!)
2985         pub fn funding_signed<SP: Deref, L: Deref>(
2986                 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2987         ) -> Result<ChannelMonitor<Signer>, ChannelError>
2988         where
2989                 SP::Target: SignerProvider<Signer = Signer>,
2990                 L::Target: Logger
2991         {
2992                 if !self.context.is_outbound() {
2993                         return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2994                 }
2995                 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2996                         return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2997                 }
2998                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2999                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
3000                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
3001                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
3002                 }
3003
3004                 let funding_script = self.context.get_funding_redeemscript();
3005
3006                 let counterparty_keys = self.context.build_remote_transaction_keys();
3007                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
3008                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3009                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3010
3011                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3012                         log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3013
3014                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3015                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
3016                 {
3017                         let trusted_tx = initial_commitment_tx.trust();
3018                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
3019                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3020                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
3021                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
3022                                 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
3023                         }
3024                 }
3025
3026                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3027                         initial_commitment_tx,
3028                         msg.signature,
3029                         Vec::new(),
3030                         &self.context.get_holder_pubkeys().funding_pubkey,
3031                         self.context.counterparty_funding_pubkey()
3032                 );
3033
3034                 self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
3035                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3036
3037
3038                 let funding_redeemscript = self.context.get_funding_redeemscript();
3039                 let funding_txo = self.context.get_funding_txo().unwrap();
3040                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
3041                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
3042                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
3043                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
3044                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
3045                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
3046                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
3047                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
3048                                                           &self.context.channel_transaction_parameters,
3049                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
3050                                                           obscure_factor,
3051                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
3052
3053                 channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
3054
3055                 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
3056                 self.context.channel_state = ChannelState::FundingSent as u32;
3057                 self.context.cur_holder_commitment_transaction_number -= 1;
3058                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3059
3060                 log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
3061
3062                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
3063                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
3064                 Ok(channel_monitor)
3065         }
3066
3067         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3068         /// and the channel is now usable (and public), this may generate an announcement_signatures to
3069         /// reply with.
3070         pub fn channel_ready<NS: Deref, L: Deref>(
3071                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
3072                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3073         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3074         where
3075                 NS::Target: NodeSigner,
3076                 L::Target: Logger
3077         {
3078                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3079                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3080                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3081                 }
3082
3083                 if let Some(scid_alias) = msg.short_channel_id_alias {
3084                         if Some(scid_alias) != self.context.short_channel_id {
3085                                 // The scid alias provided can be used to route payments *from* our counterparty,
3086                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
3087                                 // when routing outbound payments.
3088                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
3089                         }
3090                 }
3091
3092                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
3093
3094                 if non_shutdown_state == ChannelState::FundingSent as u32 {
3095                         self.context.channel_state |= ChannelState::TheirChannelReady as u32;
3096                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
3097                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
3098                         self.context.update_time_counter += 1;
3099                 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
3100                         // If we reconnected before sending our `channel_ready` they may still resend theirs:
3101                         (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
3102                                               (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
3103                 {
3104                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
3105                         // required, or they're sending a fresh SCID alias.
3106                         let expected_point =
3107                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3108                                         // If they haven't ever sent an updated point, the point they send should match
3109                                         // the current one.
3110                                         self.context.counterparty_cur_commitment_point
3111                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3112                                         // If we've advanced the commitment number once, the second commitment point is
3113                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
3114                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3115                                         self.context.counterparty_prev_commitment_point
3116                                 } else {
3117                                         // If they have sent updated points, channel_ready is always supposed to match
3118                                         // their "first" point, which we re-derive here.
3119                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3120                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3121                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
3122                                 };
3123                         if expected_point != Some(msg.next_per_commitment_point) {
3124                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3125                         }
3126                         return Ok(None);
3127                 } else {
3128                         return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
3129                 }
3130
3131                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3132                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3133
3134                 log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
3135
3136                 Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
3137         }
3138
3139         pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
3140         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
3141                 // We can't accept HTLCs sent after we've sent a shutdown.
3142                 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
3143                 if local_sent_shutdown {
3144                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3145                 }
3146                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3147                 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
3148                 if remote_sent_shutdown {
3149                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3150                 }
3151                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3152                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3153                 }
3154                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3155                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3156                 }
3157                 if msg.amount_msat == 0 {
3158                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3159                 }
3160                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3161                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3162                 }
3163
3164                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3165                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3166                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3167                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3168                 }
3169                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3170                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3171                 }
3172                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3173                 // the reserve_satoshis we told them to always have as direct payment so that they lose
3174                 // something if we punish them for broadcasting an old state).
3175                 // Note that we don't really care about having a small/no to_remote output in our local
3176                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3177                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3178                 // present in the next commitment transaction we send them (at least for fulfilled ones,
3179                 // failed ones won't modify value_to_self).
3180                 // Note that we will send HTLCs which another instance of rust-lightning would think
3181                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3182                 // Channel state once they will not be present in the next received commitment
3183                 // transaction).
3184                 let mut removed_outbound_total_msat = 0;
3185                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3186                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3187                                 removed_outbound_total_msat += htlc.amount_msat;
3188                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3189                                 removed_outbound_total_msat += htlc.amount_msat;
3190                         }
3191                 }
3192
3193                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
3194                         (0, 0)
3195                 } else {
3196                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3197                         (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
3198                                 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
3199                 };
3200                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3201                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3202                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3203                         if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
3204                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3205                                         on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
3206                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3207                         }
3208                 }
3209
3210                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3211                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3212                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3213                         if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
3214                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3215                                         on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
3216                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3217                         }
3218                 }
3219
3220                 let pending_value_to_self_msat =
3221                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3222                 let pending_remote_value_msat =
3223                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3224                 if pending_remote_value_msat < msg.amount_msat {
3225                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3226                 }
3227
3228                 // Check that the remote can afford to pay for this HTLC on-chain at the current
3229                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3230                 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3231                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3232                         self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3233                 };
3234                 if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
3235                         return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3236                 };
3237
3238                 if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3239                         return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3240                 }
3241
3242                 if !self.context.is_outbound() {
3243                         // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3244                         // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
3245                         // receiver's side, only on the sender's.
3246                         // Note that when we eventually remove support for fee updates and switch to anchor output
3247                         // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
3248                         // the extra htlc when calculating the next remote commitment transaction fee as we should
3249                         // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
3250                         // sensitive to fee spikes.
3251                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3252                         let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3253                         if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
3254                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3255                                 // the HTLC, i.e. its status is already set to failing.
3256                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
3257                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3258                         }
3259                 } else {
3260                         // Check that they won't violate our local required channel reserve by adding this HTLC.
3261                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3262                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3263                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
3264                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3265                         }
3266                 }
3267                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3268                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3269                 }
3270                 if msg.cltv_expiry >= 500000000 {
3271                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3272                 }
3273
3274                 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3275                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3276                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3277                         }
3278                 }
3279
3280                 // Now update local state:
3281                 self.context.next_counterparty_htlc_id += 1;
3282                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3283                         htlc_id: msg.htlc_id,
3284                         amount_msat: msg.amount_msat,
3285                         payment_hash: msg.payment_hash,
3286                         cltv_expiry: msg.cltv_expiry,
3287                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3288                 });
3289                 Ok(())
3290         }
3291
3292         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3293         #[inline]
3294         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3295                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3296                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3297                         if htlc.htlc_id == htlc_id {
3298                                 let outcome = match check_preimage {
3299                                         None => fail_reason.into(),
3300                                         Some(payment_preimage) => {
3301                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
3302                                                 if payment_hash != htlc.payment_hash {
3303                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3304                                                 }
3305                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
3306                                         }
3307                                 };
3308                                 match htlc.state {
3309                                         OutboundHTLCState::LocalAnnounced(_) =>
3310                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3311                                         OutboundHTLCState::Committed => {
3312                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3313                                         },
3314                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3315                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3316                                 }
3317                                 return Ok(htlc);
3318                         }
3319                 }
3320                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3321         }
3322
3323         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3324                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3325                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3326                 }
3327                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3328                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3329                 }
3330
3331                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3332         }
3333
3334         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3335                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3336                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3337                 }
3338                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3339                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3340                 }
3341
3342                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3343                 Ok(())
3344         }
3345
3346         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3347                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3348                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3349                 }
3350                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3351                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3352                 }
3353
3354                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3355                 Ok(())
3356         }
3357
3358         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
3359                 where L::Target: Logger
3360         {
3361                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3362                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3363                 }
3364                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3365                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3366                 }
3367                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3368                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3369                 }
3370
3371                 let funding_script = self.context.get_funding_redeemscript();
3372
3373                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3374
3375                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3376                 let commitment_txid = {
3377                         let trusted_tx = commitment_stats.tx.trust();
3378                         let bitcoin_tx = trusted_tx.built_transaction();
3379                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3380
3381                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3382                                 log_bytes!(msg.signature.serialize_compact()[..]),
3383                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3384                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
3385                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3386                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3387                         }
3388                         bitcoin_tx.txid
3389                 };
3390                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3391
3392                 // If our counterparty updated the channel fee in this commitment transaction, check that
3393                 // they can actually afford the new fee now.
3394                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3395                         update_state == FeeUpdateState::RemoteAnnounced
3396                 } else { false };
3397                 if update_fee {
3398                         debug_assert!(!self.context.is_outbound());
3399                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3400                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3401                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3402                         }
3403                 }
3404                 #[cfg(any(test, fuzzing))]
3405                 {
3406                         if self.context.is_outbound() {
3407                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3408                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3409                                 if let Some(info) = projected_commit_tx_info {
3410                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3411                                                 + self.context.holding_cell_htlc_updates.len();
3412                                         if info.total_pending_htlcs == total_pending_htlcs
3413                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3414                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3415                                                 && info.feerate == self.context.feerate_per_kw {
3416                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3417                                                 }
3418                                 }
3419                         }
3420                 }
3421
3422                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3423                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3424                 }
3425
3426                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3427                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3428                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3429                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3430                 // backwards compatibility, we never use it in production. To provide test coverage, here,
3431                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3432                 #[allow(unused_assignments, unused_mut)]
3433                 let mut separate_nondust_htlc_sources = false;
3434                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3435                         use core::hash::{BuildHasher, Hasher};
3436                         // Get a random value using the only std API to do so - the DefaultHasher
3437                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3438                         separate_nondust_htlc_sources = rand_val % 2 == 0;
3439                 }
3440
3441                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3442                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3443                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3444                         if let Some(_) = htlc.transaction_output_index {
3445                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3446                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
3447                                         false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3448
3449                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
3450                                 let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3451                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3452                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3453                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3454                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
3455                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3456                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3457                                 }
3458                                 if !separate_nondust_htlc_sources {
3459                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3460                                 }
3461                         } else {
3462                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3463                         }
3464                         if separate_nondust_htlc_sources {
3465                                 if let Some(source) = source_opt.take() {
3466                                         nondust_htlc_sources.push(source);
3467                                 }
3468                         }
3469                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3470                 }
3471
3472                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3473                         commitment_stats.tx,
3474                         msg.signature,
3475                         msg.htlc_signatures.clone(),
3476                         &self.context.get_holder_pubkeys().funding_pubkey,
3477                         self.context.counterparty_funding_pubkey()
3478                 );
3479
3480                 self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3481                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3482
3483                 // Update state now that we've passed all the can-fail calls...
3484                 let mut need_commitment = false;
3485                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3486                         if *update_state == FeeUpdateState::RemoteAnnounced {
3487                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3488                                 need_commitment = true;
3489                         }
3490                 }
3491
3492                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3493                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3494                                 Some(forward_info.clone())
3495                         } else { None };
3496                         if let Some(forward_info) = new_forward {
3497                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3498                                         log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
3499                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3500                                 need_commitment = true;
3501                         }
3502                 }
3503                 let mut claimed_htlcs = Vec::new();
3504                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3505                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3506                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3507                                         log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
3508                                 // Grab the preimage, if it exists, instead of cloning
3509                                 let mut reason = OutboundHTLCOutcome::Success(None);
3510                                 mem::swap(outcome, &mut reason);
3511                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3512                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3513                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3514                                         // have a `Success(None)` reason. In this case we could forget some HTLC
3515                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
3516                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
3517                                         // claim anyway.
3518                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3519                                 }
3520                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3521                                 need_commitment = true;
3522                         }
3523                 }
3524
3525                 self.context.latest_monitor_update_id += 1;
3526                 let mut monitor_update = ChannelMonitorUpdate {
3527                         update_id: self.context.latest_monitor_update_id,
3528                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3529                                 commitment_tx: holder_commitment_tx,
3530                                 htlc_outputs: htlcs_and_sigs,
3531                                 claimed_htlcs,
3532                                 nondust_htlc_sources,
3533                         }]
3534                 };
3535
3536                 self.context.cur_holder_commitment_transaction_number -= 1;
3537                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3538                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3539                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3540
3541                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3542                         // In case we initially failed monitor updating without requiring a response, we need
3543                         // to make sure the RAA gets sent first.
3544                         self.context.monitor_pending_revoke_and_ack = true;
3545                         if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3546                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3547                                 // the corresponding HTLC status updates so that get_last_commitment_update
3548                                 // includes the right HTLCs.
3549                                 self.context.monitor_pending_commitment_signed = true;
3550                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3551                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3552                                 // strictly increasing by one, so decrement it here.
3553                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3554                                 monitor_update.updates.append(&mut additional_update.updates);
3555                         }
3556                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3557                                 log_bytes!(self.context.channel_id));
3558                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
3559                 }
3560
3561                 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3562                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3563                         // we'll send one right away when we get the revoke_and_ack when we
3564                         // free_holding_cell_htlcs().
3565                         let mut additional_update = self.build_commitment_no_status_check(logger);
3566                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3567                         // strictly increasing by one, so decrement it here.
3568                         self.context.latest_monitor_update_id = monitor_update.update_id;
3569                         monitor_update.updates.append(&mut additional_update.updates);
3570                         true
3571                 } else { false };
3572
3573                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3574                         log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3575                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3576                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3577         }
3578
3579         /// Public version of the below, checking relevant preconditions first.
3580         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3581         /// returns `(None, Vec::new())`.
3582         pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
3583                 if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
3584                    (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3585                         self.free_holding_cell_htlcs(logger)
3586                 } else { (None, Vec::new()) }
3587         }
3588
3589         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3590         /// for our counterparty.
3591         fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
3592                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3593                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3594                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3595                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
3596
3597                         let mut monitor_update = ChannelMonitorUpdate {
3598                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3599                                 updates: Vec::new(),
3600                         };
3601
3602                         let mut htlc_updates = Vec::new();
3603                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3604                         let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
3605                         let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
3606                         let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
3607                         let mut htlcs_to_fail = Vec::new();
3608                         for htlc_update in htlc_updates.drain(..) {
3609                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
3610                                 // fee races with adding too many outputs which push our total payments just over
3611                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
3612                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3613                                 // to rebalance channels.
3614                                 match &htlc_update {
3615                                         &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
3616                                                 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
3617                                                         Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
3618                                                         Err(e) => {
3619                                                                 match e {
3620                                                                         ChannelError::Ignore(ref msg) => {
3621                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
3622                                                                                         log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
3623                                                                                 // If we fail to send here, then this HTLC should
3624                                                                                 // be failed backwards. Failing to send here
3625                                                                                 // indicates that this HTLC may keep being put back
3626                                                                                 // into the holding cell without ever being
3627                                                                                 // successfully forwarded/failed/fulfilled, causing
3628                                                                                 // our counterparty to eventually close on us.
3629                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
3630                                                                         },
3631                                                                         _ => {
3632                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3633                                                                         },
3634                                                                 }
3635                                                         }
3636                                                 }
3637                                         },
3638                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3639                                                 // If an HTLC claim was previously added to the holding cell (via
3640                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
3641                                                 // not fail - any in between attempts to claim the HTLC will have resulted
3642                                                 // in it hitting the holding cell again and we cannot change the state of a
3643                                                 // holding cell HTLC from fulfill to anything else.
3644                                                 let (update_fulfill_msg_option, mut additional_monitor_update) =
3645                                                         if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
3646                                                                 (msg, monitor_update)
3647                                                         } else { unreachable!() };
3648                                                 update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
3649                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
3650                                         },
3651                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3652                                                 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3653                                                         Ok(update_fail_msg_option) => {
3654                                                                 // If an HTLC failure was previously added to the holding cell (via
3655                                                                 // `queue_fail_htlc`) then generating the fail message itself must
3656                                                                 // not fail - we should never end up in a state where we double-fail
3657                                                                 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3658                                                                 // for a full revocation before failing.
3659                                                                 update_fail_htlcs.push(update_fail_msg_option.unwrap())
3660                                                         },
3661                                                         Err(e) => {
3662                                                                 if let ChannelError::Ignore(_) = e {}
3663                                                                 else {
3664                                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3665                                                                 }
3666                                                         }
3667                                                 }
3668                                         },
3669                                 }
3670                         }
3671                         if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
3672                                 return (None, htlcs_to_fail);
3673                         }
3674                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3675                                 self.send_update_fee(feerate, false, logger)
3676                         } else {
3677                                 None
3678                         };
3679
3680                         let mut additional_update = self.build_commitment_no_status_check(logger);
3681                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3682                         // but we want them to be strictly increasing by one, so reset it here.
3683                         self.context.latest_monitor_update_id = monitor_update.update_id;
3684                         monitor_update.updates.append(&mut additional_update.updates);
3685
3686                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3687                                 log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
3688                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
3689
3690                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3691                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3692                 } else {
3693                         (None, Vec::new())
3694                 }
3695         }
3696
3697         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3698         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3699         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3700         /// generating an appropriate error *after* the channel state has been updated based on the
3701         /// revoke_and_ack message.
3702         pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
3703                 where L::Target: Logger,
3704         {
3705                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3706                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3707                 }
3708                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3709                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3710                 }
3711                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3712                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3713                 }
3714
3715                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3716
3717                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3718                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3719                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3720                         }
3721                 }
3722
3723                 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3724                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
3725                         // haven't given them a new commitment transaction to broadcast). We should probably
3726                         // take advantage of this by updating our channel monitor, sending them an error, and
3727                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3728                         // lot of work, and there's some chance this is all a misunderstanding anyway.
3729                         // We have to do *something*, though, since our signer may get mad at us for otherwise
3730                         // jumping a remote commitment number, so best to just force-close and move on.
3731                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3732                 }
3733
3734                 #[cfg(any(test, fuzzing))]
3735                 {
3736                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3737                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3738                 }
3739
3740                 self.context.holder_signer.validate_counterparty_revocation(
3741                         self.context.cur_counterparty_commitment_transaction_number + 1,
3742                         &secret
3743                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3744
3745                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3746                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3747                 self.context.latest_monitor_update_id += 1;
3748                 let mut monitor_update = ChannelMonitorUpdate {
3749                         update_id: self.context.latest_monitor_update_id,
3750                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3751                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3752                                 secret: msg.per_commitment_secret,
3753                         }],
3754                 };
3755
3756                 // Update state now that we've passed all the can-fail calls...
3757                 // (note that we may still fail to generate the new commitment_signed message, but that's
3758                 // OK, we step the channel here and *then* if the new generation fails we can fail the
3759                 // channel based on that, but stepping stuff here should be safe either way.
3760                 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3761                 self.context.sent_message_awaiting_response = None;
3762                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3763                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3764                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3765
3766                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3767                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3768                 }
3769
3770                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
3771                 let mut to_forward_infos = Vec::new();
3772                 let mut revoked_htlcs = Vec::new();
3773                 let mut finalized_claimed_htlcs = Vec::new();
3774                 let mut update_fail_htlcs = Vec::new();
3775                 let mut update_fail_malformed_htlcs = Vec::new();
3776                 let mut require_commitment = false;
3777                 let mut value_to_self_msat_diff: i64 = 0;
3778
3779                 {
3780                         // Take references explicitly so that we can hold multiple references to self.context.
3781                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3782                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3783
3784                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3785                         pending_inbound_htlcs.retain(|htlc| {
3786                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3787                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
3788                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3789                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
3790                                         }
3791                                         false
3792                                 } else { true }
3793                         });
3794                         pending_outbound_htlcs.retain(|htlc| {
3795                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3796                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
3797                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3798                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3799                                         } else {
3800                                                 finalized_claimed_htlcs.push(htlc.source.clone());
3801                                                 // They fulfilled, so we sent them money
3802                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
3803                                         }
3804                                         false
3805                                 } else { true }
3806                         });
3807                         for htlc in pending_inbound_htlcs.iter_mut() {
3808                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3809                                         true
3810                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3811                                         true
3812                                 } else { false };
3813                                 if swap {
3814                                         let mut state = InboundHTLCState::Committed;
3815                                         mem::swap(&mut state, &mut htlc.state);
3816
3817                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3818                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
3819                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3820                                                 require_commitment = true;
3821                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3822                                                 match forward_info {
3823                                                         PendingHTLCStatus::Fail(fail_msg) => {
3824                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
3825                                                                 require_commitment = true;
3826                                                                 match fail_msg {
3827                                                                         HTLCFailureMsg::Relay(msg) => {
3828                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3829                                                                                 update_fail_htlcs.push(msg)
3830                                                                         },
3831                                                                         HTLCFailureMsg::Malformed(msg) => {
3832                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3833                                                                                 update_fail_malformed_htlcs.push(msg)
3834                                                                         },
3835                                                                 }
3836                                                         },
3837                                                         PendingHTLCStatus::Forward(forward_info) => {
3838                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
3839                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
3840                                                                 htlc.state = InboundHTLCState::Committed;
3841                                                         }
3842                                                 }
3843                                         }
3844                                 }
3845                         }
3846                         for htlc in pending_outbound_htlcs.iter_mut() {
3847                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3848                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
3849                                         htlc.state = OutboundHTLCState::Committed;
3850                                 }
3851                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3852                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
3853                                         // Grab the preimage, if it exists, instead of cloning
3854                                         let mut reason = OutboundHTLCOutcome::Success(None);
3855                                         mem::swap(outcome, &mut reason);
3856                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3857                                         require_commitment = true;
3858                                 }
3859                         }
3860                 }
3861                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3862
3863                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3864                         match update_state {
3865                                 FeeUpdateState::Outbound => {
3866                                         debug_assert!(self.context.is_outbound());
3867                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3868                                         self.context.feerate_per_kw = feerate;
3869                                         self.context.pending_update_fee = None;
3870                                 },
3871                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3872                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3873                                         debug_assert!(!self.context.is_outbound());
3874                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3875                                         require_commitment = true;
3876                                         self.context.feerate_per_kw = feerate;
3877                                         self.context.pending_update_fee = None;
3878                                 },
3879                         }
3880                 }
3881
3882                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3883                         // We can't actually generate a new commitment transaction (incl by freeing holding
3884                         // cells) while we can't update the monitor, so we just return what we have.
3885                         if require_commitment {
3886                                 self.context.monitor_pending_commitment_signed = true;
3887                                 // When the monitor updating is restored we'll call get_last_commitment_update(),
3888                                 // which does not update state, but we're definitely now awaiting a remote revoke
3889                                 // before we can step forward any more, so set it here.
3890                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3891                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3892                                 // strictly increasing by one, so decrement it here.
3893                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3894                                 monitor_update.updates.append(&mut additional_update.updates);
3895                         }
3896                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3897                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3898                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3899                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
3900                         return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
3901                 }
3902
3903                 match self.free_holding_cell_htlcs(logger) {
3904                         (Some(_), htlcs_to_fail) => {
3905                                 let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
3906                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3907                                 // strictly increasing by one, so decrement it here.
3908                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3909                                 monitor_update.updates.append(&mut additional_update.updates);
3910
3911                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3912                                 Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3913                         },
3914                         (None, htlcs_to_fail) => {
3915                                 if require_commitment {
3916                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3917
3918                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3919                                         // strictly increasing by one, so decrement it here.
3920                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3921                                         monitor_update.updates.append(&mut additional_update.updates);
3922
3923                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
3924                                                 log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
3925                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3926                                         Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3927                                 } else {
3928                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
3929                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3930                                         Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3931                                 }
3932                         }
3933                 }
3934         }
3935
3936         /// Queues up an outbound update fee by placing it in the holding cell. You should call
3937         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3938         /// commitment update.
3939         pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
3940                 let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
3941                 assert!(msg_opt.is_none(), "We forced holding cell?");
3942         }
3943
3944         /// Adds a pending update to this channel. See the doc for send_htlc for
3945         /// further details on the optionness of the return value.
3946         /// If our balance is too low to cover the cost of the next commitment transaction at the
3947         /// new feerate, the update is cancelled.
3948         ///
3949         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3950         /// [`Channel`] if `force_holding_cell` is false.
3951         fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
3952                 if !self.context.is_outbound() {
3953                         panic!("Cannot send fee from inbound channel");
3954                 }
3955                 if !self.context.is_usable() {
3956                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3957                 }
3958                 if !self.context.is_live() {
3959                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3960                 }
3961
3962                 // Before proposing a feerate update, check that we can actually afford the new fee.
3963                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3964                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3965                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3966                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3967                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
3968                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3969                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3970                         //TODO: auto-close after a number of failures?
3971                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3972                         return None;
3973                 }
3974
3975                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3976                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3977                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3978                 if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
3979                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3980                         return None;
3981                 }
3982                 if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
3983                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3984                         return None;
3985                 }
3986
3987                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3988                         force_holding_cell = true;
3989                 }
3990
3991                 if force_holding_cell {
3992                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
3993                         return None;
3994                 }
3995
3996                 debug_assert!(self.context.pending_update_fee.is_none());
3997                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3998
3999                 Some(msgs::UpdateFee {
4000                         channel_id: self.context.channel_id,
4001                         feerate_per_kw,
4002                 })
4003         }
4004
4005         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4006         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4007         /// resent.
4008         /// No further message handling calls may be made until a channel_reestablish dance has
4009         /// completed.
4010         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L)  where L::Target: Logger {
4011                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4012                 if self.context.channel_state < ChannelState::FundingSent as u32 {
4013                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
4014                         return;
4015                 }
4016
4017                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
4018                         // While the below code should be idempotent, it's simpler to just return early, as
4019                         // redundant disconnect events can fire, though they should be rare.
4020                         return;
4021                 }
4022
4023                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4024                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4025                 }
4026
4027                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4028                 // will be retransmitted.
4029                 self.context.last_sent_closing_fee = None;
4030                 self.context.pending_counterparty_closing_signed = None;
4031                 self.context.closing_fee_limits = None;
4032
4033                 let mut inbound_drop_count = 0;
4034                 self.context.pending_inbound_htlcs.retain(|htlc| {
4035                         match htlc.state {
4036                                 InboundHTLCState::RemoteAnnounced(_) => {
4037                                         // They sent us an update_add_htlc but we never got the commitment_signed.
4038                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
4039                                         // this HTLC accordingly
4040                                         inbound_drop_count += 1;
4041                                         false
4042                                 },
4043                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4044                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
4045                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4046                                         // in response to it yet, so don't touch it.
4047                                         true
4048                                 },
4049                                 InboundHTLCState::Committed => true,
4050                                 InboundHTLCState::LocalRemoved(_) => {
4051                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4052                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
4053                                         // (that we missed). Keep this around for now and if they tell us they missed
4054                                         // the commitment_signed we can re-transmit the update then.
4055                                         true
4056                                 },
4057                         }
4058                 });
4059                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4060
4061                 if let Some((_, update_state)) = self.context.pending_update_fee {
4062                         if update_state == FeeUpdateState::RemoteAnnounced {
4063                                 debug_assert!(!self.context.is_outbound());
4064                                 self.context.pending_update_fee = None;
4065                         }
4066                 }
4067
4068                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4069                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4070                                 // They sent us an update to remove this but haven't yet sent the corresponding
4071                                 // commitment_signed, we need to move it back to Committed and they can re-send
4072                                 // the update upon reconnection.
4073                                 htlc.state = OutboundHTLCState::Committed;
4074                         }
4075                 }
4076
4077                 self.context.sent_message_awaiting_response = None;
4078
4079                 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
4080                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
4081         }
4082
4083         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4084         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4085         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4086         /// update completes (potentially immediately).
4087         /// The messages which were generated with the monitor update must *not* have been sent to the
4088         /// remote end, and must instead have been dropped. They will be regenerated when
4089         /// [`Self::monitor_updating_restored`] is called.
4090         ///
4091         /// [`ChannelManager`]: super::channelmanager::ChannelManager
4092         /// [`chain::Watch`]: crate::chain::Watch
4093         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4094         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4095                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4096                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4097                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4098         ) {
4099                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4100                 self.context.monitor_pending_commitment_signed |= resend_commitment;
4101                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4102                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4103                 self.context.monitor_pending_failures.append(&mut pending_fails);
4104                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4105                 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
4106         }
4107
4108         /// Indicates that the latest ChannelMonitor update has been committed by the client
4109         /// successfully and we should restore normal operation. Returns messages which should be sent
4110         /// to the remote side.
4111         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4112                 &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
4113                 user_config: &UserConfig, best_block_height: u32
4114         ) -> MonitorRestoreUpdates
4115         where
4116                 L::Target: Logger,
4117                 NS::Target: NodeSigner
4118         {
4119                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
4120                 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
4121                 let mut found_blocked = false;
4122                 self.context.pending_monitor_updates.retain(|upd| {
4123                         if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
4124                         if upd.blocked { found_blocked = true; }
4125                         upd.blocked
4126                 });
4127
4128                 // If we're past (or at) the FundingSent stage on an outbound channel, try to
4129                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4130                 // first received the funding_signed.
4131                 let mut funding_broadcastable =
4132                         if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
4133                                 self.context.funding_transaction.take()
4134                         } else { None };
4135                 // That said, if the funding transaction is already confirmed (ie we're active with a
4136                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4137                 if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
4138                         funding_broadcastable = None;
4139                 }
4140
4141                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4142                 // (and we assume the user never directly broadcasts the funding transaction and waits for
4143                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4144                 // * an inbound channel that failed to persist the monitor on funding_created and we got
4145                 //   the funding transaction confirmed before the monitor was persisted, or
4146                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4147                 let channel_ready = if self.context.monitor_pending_channel_ready {
4148                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4149                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4150                         self.context.monitor_pending_channel_ready = false;
4151                         let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4152                         Some(msgs::ChannelReady {
4153                                 channel_id: self.context.channel_id(),
4154                                 next_per_commitment_point,
4155                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4156                         })
4157                 } else { None };
4158
4159                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
4160
4161                 let mut accepted_htlcs = Vec::new();
4162                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4163                 let mut failed_htlcs = Vec::new();
4164                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4165                 let mut finalized_claimed_htlcs = Vec::new();
4166                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4167
4168                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
4169                         self.context.monitor_pending_revoke_and_ack = false;
4170                         self.context.monitor_pending_commitment_signed = false;
4171                         return MonitorRestoreUpdates {
4172                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4173                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4174                         };
4175                 }
4176
4177                 let raa = if self.context.monitor_pending_revoke_and_ack {
4178                         Some(self.get_last_revoke_and_ack())
4179                 } else { None };
4180                 let commitment_update = if self.context.monitor_pending_commitment_signed {
4181                         self.mark_awaiting_response();
4182                         Some(self.get_last_commitment_update(logger))
4183                 } else { None };
4184
4185                 self.context.monitor_pending_revoke_and_ack = false;
4186                 self.context.monitor_pending_commitment_signed = false;
4187                 let order = self.context.resend_order.clone();
4188                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4189                         log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4190                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4191                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4192                 MonitorRestoreUpdates {
4193                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4194                 }
4195         }
4196
4197         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4198                 where F::Target: FeeEstimator, L::Target: Logger
4199         {
4200                 if self.context.is_outbound() {
4201                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4202                 }
4203                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4204                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4205                 }
4206                 Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4207                 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
4208
4209                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4210                 self.context.update_time_counter += 1;
4211                 // If the feerate has increased over the previous dust buffer (note that
4212                 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
4213                 // won't be pushed over our dust exposure limit by the feerate increase.
4214                 if feerate_over_dust_buffer {
4215                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4216                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4217                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4218                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4219                         if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
4220                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4221                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
4222                         }
4223                         if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
4224                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4225                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4226                         }
4227                 }
4228                 Ok(())
4229         }
4230
4231         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4232                 let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4233                 let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4234                 msgs::RevokeAndACK {
4235                         channel_id: self.context.channel_id,
4236                         per_commitment_secret,
4237                         next_per_commitment_point,
4238                         #[cfg(taproot)]
4239                         next_local_nonce: None,
4240                 }
4241         }
4242
4243         fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
4244                 let mut update_add_htlcs = Vec::new();
4245                 let mut update_fulfill_htlcs = Vec::new();
4246                 let mut update_fail_htlcs = Vec::new();
4247                 let mut update_fail_malformed_htlcs = Vec::new();
4248
4249                 for htlc in self.context.pending_outbound_htlcs.iter() {
4250                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4251                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
4252                                         channel_id: self.context.channel_id(),
4253                                         htlc_id: htlc.htlc_id,
4254                                         amount_msat: htlc.amount_msat,
4255                                         payment_hash: htlc.payment_hash,
4256                                         cltv_expiry: htlc.cltv_expiry,
4257                                         onion_routing_packet: (**onion_packet).clone(),
4258                                 });
4259                         }
4260                 }
4261
4262                 for htlc in self.context.pending_inbound_htlcs.iter() {
4263                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4264                                 match reason {
4265                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4266                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4267                                                         channel_id: self.context.channel_id(),
4268                                                         htlc_id: htlc.htlc_id,
4269                                                         reason: err_packet.clone()
4270                                                 });
4271                                         },
4272                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4273                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4274                                                         channel_id: self.context.channel_id(),
4275                                                         htlc_id: htlc.htlc_id,
4276                                                         sha256_of_onion: sha256_of_onion.clone(),
4277                                                         failure_code: failure_code.clone(),
4278                                                 });
4279                                         },
4280                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4281                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4282                                                         channel_id: self.context.channel_id(),
4283                                                         htlc_id: htlc.htlc_id,
4284                                                         payment_preimage: payment_preimage.clone(),
4285                                                 });
4286                                         },
4287                                 }
4288                         }
4289                 }
4290
4291                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4292                         Some(msgs::UpdateFee {
4293                                 channel_id: self.context.channel_id(),
4294                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4295                         })
4296                 } else { None };
4297
4298                 log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4299                                 log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
4300                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4301                 msgs::CommitmentUpdate {
4302                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4303                         commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
4304                 }
4305         }
4306
4307         /// May panic if some calls other than message-handling calls (which will all Err immediately)
4308         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4309         ///
4310         /// Some links printed in log lines are included here to check them during build (when run with
4311         /// `cargo doc --document-private-items`):
4312         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4313         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4314         pub fn channel_reestablish<L: Deref, NS: Deref>(
4315                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4316                 genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
4317         ) -> Result<ReestablishResponses, ChannelError>
4318         where
4319                 L::Target: Logger,
4320                 NS::Target: NodeSigner
4321         {
4322                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4323                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4324                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
4325                         // just close here instead of trying to recover.
4326                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4327                 }
4328
4329                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4330                         msg.next_local_commitment_number == 0 {
4331                         return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
4332                 }
4333
4334                 if msg.next_remote_commitment_number > 0 {
4335                         let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4336                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4337                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4338                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4339                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4340                         }
4341                         if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4342                                 macro_rules! log_and_panic {
4343                                         ($err_msg: expr) => {
4344                                                 log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
4345                                                 panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
4346                                         }
4347                                 }
4348                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4349                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4350                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4351                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4352                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4353                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4354                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4355                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4356                         }
4357                 }
4358
4359                 // Before we change the state of the channel, we check if the peer is sending a very old
4360                 // commitment transaction number, if yes we send a warning message.
4361                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4362                 if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4363                         return Err(
4364                                 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4365                         );
4366                 }
4367
4368                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4369                 // remaining cases either succeed or ErrorMessage-fail).
4370                 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4371                 self.context.sent_message_awaiting_response = None;
4372
4373                 let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4374                         assert!(self.context.shutdown_scriptpubkey.is_some());
4375                         Some(msgs::Shutdown {
4376                                 channel_id: self.context.channel_id,
4377                                 scriptpubkey: self.get_closing_scriptpubkey(),
4378                         })
4379                 } else { None };
4380
4381                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
4382
4383                 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4384                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4385                         if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4386                                         self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4387                                 if msg.next_remote_commitment_number != 0 {
4388                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4389                                 }
4390                                 // Short circuit the whole handler as there is nothing we can resend them
4391                                 return Ok(ReestablishResponses {
4392                                         channel_ready: None,
4393                                         raa: None, commitment_update: None,
4394                                         order: RAACommitmentOrder::CommitmentFirst,
4395                                         shutdown_msg, announcement_sigs,
4396                                 });
4397                         }
4398
4399                         // We have OurChannelReady set!
4400                         let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4401                         return Ok(ReestablishResponses {
4402                                 channel_ready: Some(msgs::ChannelReady {
4403                                         channel_id: self.context.channel_id(),
4404                                         next_per_commitment_point,
4405                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
4406                                 }),
4407                                 raa: None, commitment_update: None,
4408                                 order: RAACommitmentOrder::CommitmentFirst,
4409                                 shutdown_msg, announcement_sigs,
4410                         });
4411                 }
4412
4413                 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4414                         // Remote isn't waiting on any RevokeAndACK from us!
4415                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4416                         None
4417                 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4418                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4419                                 self.context.monitor_pending_revoke_and_ack = true;
4420                                 None
4421                         } else {
4422                                 Some(self.get_last_revoke_and_ack())
4423                         }
4424                 } else {
4425                         return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4426                 };
4427
4428                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4429                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4430                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4431                 // the corresponding revoke_and_ack back yet.
4432                 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4433                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4434                         self.mark_awaiting_response();
4435                 }
4436                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4437
4438                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4439                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4440                         let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4441                         Some(msgs::ChannelReady {
4442                                 channel_id: self.context.channel_id(),
4443                                 next_per_commitment_point,
4444                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4445                         })
4446                 } else { None };
4447
4448                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4449                         if required_revoke.is_some() {
4450                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
4451                         } else {
4452                                 log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
4453                         }
4454
4455                         Ok(ReestablishResponses {
4456                                 channel_ready, shutdown_msg, announcement_sigs,
4457                                 raa: required_revoke,
4458                                 commitment_update: None,
4459                                 order: self.context.resend_order.clone(),
4460                         })
4461                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4462                         if required_revoke.is_some() {
4463                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
4464                         } else {
4465                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
4466                         }
4467
4468                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4469                                 self.context.monitor_pending_commitment_signed = true;
4470                                 Ok(ReestablishResponses {
4471                                         channel_ready, shutdown_msg, announcement_sigs,
4472                                         commitment_update: None, raa: None,
4473                                         order: self.context.resend_order.clone(),
4474                                 })
4475                         } else {
4476                                 Ok(ReestablishResponses {
4477                                         channel_ready, shutdown_msg, announcement_sigs,
4478                                         raa: required_revoke,
4479                                         commitment_update: Some(self.get_last_commitment_update(logger)),
4480                                         order: self.context.resend_order.clone(),
4481                                 })
4482                         }
4483                 } else {
4484                         Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4485                 }
4486         }
4487
4488         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4489         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4490         /// at which point they will be recalculated.
4491         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4492                 -> (u64, u64)
4493                 where F::Target: FeeEstimator
4494         {
4495                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4496
4497                 // Propose a range from our current Background feerate to our Normal feerate plus our
4498                 // force_close_avoidance_max_fee_satoshis.
4499                 // If we fail to come to consensus, we'll have to force-close.
4500                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
4501                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
4502                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4503
4504                 // The spec requires that (when the channel does not have anchors) we only send absolute
4505                 // channel fees no greater than the absolute channel fee on the current commitment
4506                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4507                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4508                 // some force-closure by old nodes, but we wanted to close the channel anyway.
4509
4510                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4511                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4512                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4513                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4514                 }
4515
4516                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4517                 // below our dust limit, causing the output to disappear. We don't bother handling this
4518                 // case, however, as this should only happen if a channel is closed before any (material)
4519                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4520                 // come to consensus with our counterparty on appropriate fees, however it should be a
4521                 // relatively rare case. We can revisit this later, though note that in order to determine
4522                 // if the funders' output is dust we have to know the absolute fee we're going to use.
4523                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4524                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4525                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4526                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
4527                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
4528                                 // target feerate-calculated fee.
4529                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4530                                         proposed_max_feerate as u64 * tx_weight / 1000)
4531                         } else {
4532                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4533                         };
4534
4535                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4536                 self.context.closing_fee_limits.clone().unwrap()
4537         }
4538
4539         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4540         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4541         /// this point if we're the funder we should send the initial closing_signed, and in any case
4542         /// shutdown should complete within a reasonable timeframe.
4543         fn closing_negotiation_ready(&self) -> bool {
4544                 self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
4545                         self.context.channel_state &
4546                                 (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
4547                                  ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
4548                                 == BOTH_SIDES_SHUTDOWN_MASK &&
4549                         self.context.pending_update_fee.is_none()
4550         }
4551
4552         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4553         /// an Err if no progress is being made and the channel should be force-closed instead.
4554         /// Should be called on a one-minute timer.
4555         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4556                 if self.closing_negotiation_ready() {
4557                         if self.context.closing_signed_in_flight {
4558                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4559                         } else {
4560                                 self.context.closing_signed_in_flight = true;
4561                         }
4562                 }
4563                 Ok(())
4564         }
4565
4566         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4567                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4568                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4569                 where F::Target: FeeEstimator, L::Target: Logger
4570         {
4571                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4572                         return Ok((None, None));
4573                 }
4574
4575                 if !self.context.is_outbound() {
4576                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4577                                 return self.closing_signed(fee_estimator, &msg);
4578                         }
4579                         return Ok((None, None));
4580                 }
4581
4582                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4583
4584                 assert!(self.context.shutdown_scriptpubkey.is_some());
4585                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4586                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4587                         our_min_fee, our_max_fee, total_fee_satoshis);
4588
4589                 let sig = self.context.holder_signer
4590                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4591                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4592
4593                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4594                 Ok((Some(msgs::ClosingSigned {
4595                         channel_id: self.context.channel_id,
4596                         fee_satoshis: total_fee_satoshis,
4597                         signature: sig,
4598                         fee_range: Some(msgs::ClosingSignedFeeRange {
4599                                 min_fee_satoshis: our_min_fee,
4600                                 max_fee_satoshis: our_max_fee,
4601                         }),
4602                 }), None))
4603         }
4604
4605         // Marks a channel as waiting for a response from the counterparty. If it's not received
4606         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4607         // a reconnection.
4608         fn mark_awaiting_response(&mut self) {
4609                 self.context.sent_message_awaiting_response = Some(0);
4610         }
4611
4612         /// Determines whether we should disconnect the counterparty due to not receiving a response
4613         /// within our expected timeframe.
4614         ///
4615         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4616         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4617                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4618                         ticks_elapsed
4619                 } else {
4620                         // Don't disconnect when we're not waiting on a response.
4621                         return false;
4622                 };
4623                 *ticks_elapsed += 1;
4624                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4625         }
4626
4627         pub fn shutdown<SP: Deref>(
4628                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4629         ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4630         where SP::Target: SignerProvider
4631         {
4632                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4633                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4634                 }
4635                 if self.context.channel_state < ChannelState::FundingSent as u32 {
4636                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
4637                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4638                         // can do that via error message without getting a connection fail anyway...
4639                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4640                 }
4641                 for htlc in self.context.pending_inbound_htlcs.iter() {
4642                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4643                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4644                         }
4645                 }
4646                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4647
4648                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4649                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4650                 }
4651
4652                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4653                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4654                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4655                         }
4656                 } else {
4657                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4658                 }
4659
4660                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4661                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4662                 // any further commitment updates after we set LocalShutdownSent.
4663                 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4664
4665                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4666                         Some(_) => false,
4667                         None => {
4668                                 assert!(send_shutdown);
4669                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4670                                         Ok(scriptpubkey) => scriptpubkey,
4671                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4672                                 };
4673                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
4674                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4675                                 }
4676                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4677                                 true
4678                         },
4679                 };
4680
4681                 // From here on out, we may not fail!
4682
4683                 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4684                 self.context.update_time_counter += 1;
4685
4686                 let monitor_update = if update_shutdown_script {
4687                         self.context.latest_monitor_update_id += 1;
4688                         let monitor_update = ChannelMonitorUpdate {
4689                                 update_id: self.context.latest_monitor_update_id,
4690                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4691                                         scriptpubkey: self.get_closing_scriptpubkey(),
4692                                 }],
4693                         };
4694                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4695                         if self.push_blockable_mon_update(monitor_update) {
4696                                 self.context.pending_monitor_updates.last().map(|upd| &upd.update)
4697                         } else { None }
4698                 } else { None };
4699                 let shutdown = if send_shutdown {
4700                         Some(msgs::Shutdown {
4701                                 channel_id: self.context.channel_id,
4702                                 scriptpubkey: self.get_closing_scriptpubkey(),
4703                         })
4704                 } else { None };
4705
4706                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4707                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4708                 // cell HTLCs and return them to fail the payment.
4709                 self.context.holding_cell_update_fee = None;
4710                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4711                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4712                         match htlc_update {
4713                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4714                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4715                                         false
4716                                 },
4717                                 _ => true
4718                         }
4719                 });
4720
4721                 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4722                 self.context.update_time_counter += 1;
4723
4724                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4725         }
4726
4727         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4728                 let mut tx = closing_tx.trust().built_transaction().clone();
4729
4730                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4731
4732                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4733                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4734                 let mut holder_sig = sig.serialize_der().to_vec();
4735                 holder_sig.push(EcdsaSighashType::All as u8);
4736                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4737                 cp_sig.push(EcdsaSighashType::All as u8);
4738                 if funding_key[..] < counterparty_funding_key[..] {
4739                         tx.input[0].witness.push(holder_sig);
4740                         tx.input[0].witness.push(cp_sig);
4741                 } else {
4742                         tx.input[0].witness.push(cp_sig);
4743                         tx.input[0].witness.push(holder_sig);
4744                 }
4745
4746                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4747                 tx
4748         }
4749
4750         pub fn closing_signed<F: Deref>(
4751                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4752                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4753                 where F::Target: FeeEstimator
4754         {
4755                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4756                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4757                 }
4758                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4759                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4760                 }
4761                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4762                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4763                 }
4764                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4765                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4766                 }
4767
4768                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4769                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4770                 }
4771
4772                 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4773                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
4774                         return Ok((None, None));
4775                 }
4776
4777                 let funding_redeemscript = self.context.get_funding_redeemscript();
4778                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4779                 if used_total_fee != msg.fee_satoshis {
4780                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4781                 }
4782                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4783
4784                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4785                         Ok(_) => {},
4786                         Err(_e) => {
4787                                 // The remote end may have decided to revoke their output due to inconsistent dust
4788                                 // limits, so check for that case by re-checking the signature here.
4789                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4790                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4791                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4792                         },
4793                 };
4794
4795                 for outp in closing_tx.trust().built_transaction().output.iter() {
4796                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4797                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4798                         }
4799                 }
4800
4801                 assert!(self.context.shutdown_scriptpubkey.is_some());
4802                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4803                         if last_fee == msg.fee_satoshis {
4804                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4805                                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4806                                 self.context.update_time_counter += 1;
4807                                 return Ok((None, Some(tx)));
4808                         }
4809                 }
4810
4811                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4812
4813                 macro_rules! propose_fee {
4814                         ($new_fee: expr) => {
4815                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4816                                         (closing_tx, $new_fee)
4817                                 } else {
4818                                         self.build_closing_transaction($new_fee, false)
4819                                 };
4820
4821                                 let sig = self.context.holder_signer
4822                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4823                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4824
4825                                 let signed_tx = if $new_fee == msg.fee_satoshis {
4826                                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
4827                                         self.context.update_time_counter += 1;
4828                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4829                                         Some(tx)
4830                                 } else { None };
4831
4832                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4833                                 return Ok((Some(msgs::ClosingSigned {
4834                                         channel_id: self.context.channel_id,
4835                                         fee_satoshis: used_fee,
4836                                         signature: sig,
4837                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4838                                                 min_fee_satoshis: our_min_fee,
4839                                                 max_fee_satoshis: our_max_fee,
4840                                         }),
4841                                 }), signed_tx))
4842                         }
4843                 }
4844
4845                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4846                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4847                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4848                         }
4849                         if max_fee_satoshis < our_min_fee {
4850                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4851                         }
4852                         if min_fee_satoshis > our_max_fee {
4853                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4854                         }
4855
4856                         if !self.context.is_outbound() {
4857                                 // They have to pay, so pick the highest fee in the overlapping range.
4858                                 // We should never set an upper bound aside from their full balance
4859                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4860                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4861                         } else {
4862                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4863                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4864                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
4865                                 }
4866                                 // The proposed fee is in our acceptable range, accept it and broadcast!
4867                                 propose_fee!(msg.fee_satoshis);
4868                         }
4869                 } else {
4870                         // Old fee style negotiation. We don't bother to enforce whether they are complying
4871                         // with the "making progress" requirements, we just comply and hope for the best.
4872                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4873                                 if msg.fee_satoshis > last_fee {
4874                                         if msg.fee_satoshis < our_max_fee {
4875                                                 propose_fee!(msg.fee_satoshis);
4876                                         } else if last_fee < our_max_fee {
4877                                                 propose_fee!(our_max_fee);
4878                                         } else {
4879                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4880                                         }
4881                                 } else {
4882                                         if msg.fee_satoshis > our_min_fee {
4883                                                 propose_fee!(msg.fee_satoshis);
4884                                         } else if last_fee > our_min_fee {
4885                                                 propose_fee!(our_min_fee);
4886                                         } else {
4887                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4888                                         }
4889                                 }
4890                         } else {
4891                                 if msg.fee_satoshis < our_min_fee {
4892                                         propose_fee!(our_min_fee);
4893                                 } else if msg.fee_satoshis > our_max_fee {
4894                                         propose_fee!(our_max_fee);
4895                                 } else {
4896                                         propose_fee!(msg.fee_satoshis);
4897                                 }
4898                         }
4899                 }
4900         }
4901
4902         fn internal_htlc_satisfies_config(
4903                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4904         ) -> Result<(), (&'static str, u16)> {
4905                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4906                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4907                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4908                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4909                         return Err((
4910                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4911                                 0x1000 | 12, // fee_insufficient
4912                         ));
4913                 }
4914                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4915                         return Err((
4916                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4917                                 0x1000 | 13, // incorrect_cltv_expiry
4918                         ));
4919                 }
4920                 Ok(())
4921         }
4922
4923         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4924         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4925         /// unsuccessful, falls back to the previous one if one exists.
4926         pub fn htlc_satisfies_config(
4927                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4928         ) -> Result<(), (&'static str, u16)> {
4929                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4930                         .or_else(|err| {
4931                                 if let Some(prev_config) = self.context.prev_config() {
4932                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4933                                 } else {
4934                                         Err(err)
4935                                 }
4936                         })
4937         }
4938
4939         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4940                 self.context.cur_holder_commitment_transaction_number + 1
4941         }
4942
4943         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4944                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4945         }
4946
4947         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4948                 self.context.cur_counterparty_commitment_transaction_number + 2
4949         }
4950
4951         #[cfg(test)]
4952         pub fn get_signer(&self) -> &Signer {
4953                 &self.context.holder_signer
4954         }
4955
4956         #[cfg(test)]
4957         pub fn get_value_stat(&self) -> ChannelValueStat {
4958                 ChannelValueStat {
4959                         value_to_self_msat: self.context.value_to_self_msat,
4960                         channel_value_msat: self.context.channel_value_satoshis * 1000,
4961                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4962                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4963                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4964                         holding_cell_outbound_amount_msat: {
4965                                 let mut res = 0;
4966                                 for h in self.context.holding_cell_htlc_updates.iter() {
4967                                         match h {
4968                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4969                                                         res += amount_msat;
4970                                                 }
4971                                                 _ => {}
4972                                         }
4973                                 }
4974                                 res
4975                         },
4976                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4977                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4978                 }
4979         }
4980
4981         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4982         /// Allowed in any state (including after shutdown)
4983         pub fn is_awaiting_monitor_update(&self) -> bool {
4984                 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4985         }
4986
4987         pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
4988                 if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4989                 self.context.pending_monitor_updates[0].update.update_id - 1
4990         }
4991
4992         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4993         /// further blocked monitor update exists after the next.
4994         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
4995                 for i in 0..self.context.pending_monitor_updates.len() {
4996                         if self.context.pending_monitor_updates[i].blocked {
4997                                 self.context.pending_monitor_updates[i].blocked = false;
4998                                 return Some((&self.context.pending_monitor_updates[i].update,
4999                                         self.context.pending_monitor_updates.len() > i + 1));
5000                         }
5001                 }
5002                 None
5003         }
5004
5005         /// Pushes a new monitor update into our monitor update queue, returning whether it should be
5006         /// immediately given to the user for persisting or if it should be held as blocked.
5007         fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
5008                 let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
5009                 self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
5010                         update, blocked: !release_monitor
5011                 });
5012                 release_monitor
5013         }
5014
5015         /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
5016         /// it should be immediately given to the user for persisting or `None` if it should be held as
5017         /// blocked.
5018         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5019         -> Option<&ChannelMonitorUpdate> {
5020                 let release_monitor = self.push_blockable_mon_update(update);
5021                 if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
5022         }
5023
5024         pub fn no_monitor_updates_pending(&self) -> bool {
5025                 self.context.pending_monitor_updates.is_empty()
5026         }
5027
5028         pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
5029                 self.context.pending_monitor_updates.retain(|upd| {
5030                         if upd.update.update_id <= update_id {
5031                                 assert!(!upd.blocked, "Completed update must have flown");
5032                                 false
5033                         } else { true }
5034                 });
5035         }
5036
5037         pub fn complete_one_mon_update(&mut self, update_id: u64) {
5038                 self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
5039         }
5040
5041         /// Returns an iterator over all unblocked monitor updates which have not yet completed.
5042         pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
5043                 self.context.pending_monitor_updates.iter()
5044                         .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
5045         }
5046
5047         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5048         /// If the channel is outbound, this implies we have not yet broadcasted the funding
5049         /// transaction. If the channel is inbound, this implies simply that the channel has not
5050         /// advanced state.
5051         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5052                 if !self.is_awaiting_monitor_update() { return false; }
5053                 if self.context.channel_state &
5054                         !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
5055                                 == ChannelState::FundingSent as u32 {
5056                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5057                         // FundingSent set, though our peer could have sent their channel_ready.
5058                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5059                         return true;
5060                 }
5061                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5062                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5063                         // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
5064                         // waiting for the initial monitor persistence. Thus, we check if our commitment
5065                         // transaction numbers have both been iterated only exactly once (for the
5066                         // funding_signed), and we're awaiting monitor update.
5067                         //
5068                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5069                         // only way to get an awaiting-monitor-update state during initial funding is if the
5070                         // initial monitor persistence is still pending).
5071                         //
5072                         // Because deciding we're awaiting initial broadcast spuriously could result in
5073                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5074                         // we hard-assert here, even in production builds.
5075                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5076                         assert!(self.context.monitor_pending_channel_ready);
5077                         assert_eq!(self.context.latest_monitor_update_id, 0);
5078                         return true;
5079                 }
5080                 false
5081         }
5082
5083         /// Returns true if our channel_ready has been sent
5084         pub fn is_our_channel_ready(&self) -> bool {
5085                 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32
5086         }
5087
5088         /// Returns true if our peer has either initiated or agreed to shut down the channel.
5089         pub fn received_shutdown(&self) -> bool {
5090                 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
5091         }
5092
5093         /// Returns true if we either initiated or agreed to shut down the channel.
5094         pub fn sent_shutdown(&self) -> bool {
5095                 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
5096         }
5097
5098         /// Returns true if this channel is fully shut down. True here implies that no further actions
5099         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5100         /// will be handled appropriately by the chain monitor.
5101         pub fn is_shutdown(&self) -> bool {
5102                 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
5103                         assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
5104                         true
5105                 } else { false }
5106         }
5107
5108         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5109                 self.context.channel_update_status
5110         }
5111
5112         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5113                 self.context.update_time_counter += 1;
5114                 self.context.channel_update_status = status;
5115         }
5116
5117         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5118                 // Called:
5119                 //  * always when a new block/transactions are confirmed with the new height
5120                 //  * when funding is signed with a height of 0
5121                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5122                         return None;
5123                 }
5124
5125                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5126                 if funding_tx_confirmations <= 0 {
5127                         self.context.funding_tx_confirmation_height = 0;
5128                 }
5129
5130                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5131                         return None;
5132                 }
5133
5134                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5135                 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5136                         self.context.channel_state |= ChannelState::OurChannelReady as u32;
5137                         true
5138                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5139                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5140                         self.context.update_time_counter += 1;
5141                         true
5142                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5143                         // We got a reorg but not enough to trigger a force close, just ignore.
5144                         false
5145                 } else {
5146                         if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 {
5147                                 // We should never see a funding transaction on-chain until we've received
5148                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5149                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5150                                 // however, may do this and we shouldn't treat it as a bug.
5151                                 #[cfg(not(fuzzing))]
5152                                 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5153                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5154                                         self.context.channel_state);
5155                         }
5156                         // We got a reorg but not enough to trigger a force close, just ignore.
5157                         false
5158                 };
5159
5160                 if need_commitment_update {
5161                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5162                                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5163                                         let next_per_commitment_point =
5164                                                 self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5165                                         return Some(msgs::ChannelReady {
5166                                                 channel_id: self.context.channel_id,
5167                                                 next_per_commitment_point,
5168                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5169                                         });
5170                                 }
5171                         } else {
5172                                 self.context.monitor_pending_channel_ready = true;
5173                         }
5174                 }
5175                 None
5176         }
5177
5178         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5179         /// In the first case, we store the confirmation height and calculating the short channel id.
5180         /// In the second, we simply return an Err indicating we need to be force-closed now.
5181         pub fn transactions_confirmed<NS: Deref, L: Deref>(
5182                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5183                 genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5184         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5185         where
5186                 NS::Target: NodeSigner,
5187                 L::Target: Logger
5188         {
5189                 if let Some(funding_txo) = self.context.get_funding_txo() {
5190                         for &(index_in_block, tx) in txdata.iter() {
5191                                 // Check if the transaction is the expected funding transaction, and if it is,
5192                                 // check that it pays the right amount to the right script.
5193                                 if self.context.funding_tx_confirmation_height == 0 {
5194                                         if tx.txid() == funding_txo.txid {
5195                                                 let txo_idx = funding_txo.index as usize;
5196                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5197                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5198                                                         if self.context.is_outbound() {
5199                                                                 // If we generated the funding transaction and it doesn't match what it
5200                                                                 // should, the client is really broken and we should just panic and
5201                                                                 // tell them off. That said, because hash collisions happen with high
5202                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
5203                                                                 // channel and move on.
5204                                                                 #[cfg(not(fuzzing))]
5205                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5206                                                         }
5207                                                         self.context.update_time_counter += 1;
5208                                                         let err_reason = "funding tx had wrong script/value or output index";
5209                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5210                                                 } else {
5211                                                         if self.context.is_outbound() {
5212                                                                 for input in tx.input.iter() {
5213                                                                         if input.witness.is_empty() {
5214                                                                                 // We generated a malleable funding transaction, implying we've
5215                                                                                 // just exposed ourselves to funds loss to our counterparty.
5216                                                                                 #[cfg(not(fuzzing))]
5217                                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5218                                                                         }
5219                                                                 }
5220                                                         }
5221                                                         self.context.funding_tx_confirmation_height = height;
5222                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
5223                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5224                                                                 Ok(scid) => Some(scid),
5225                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5226                                                         }
5227                                                 }
5228                                         }
5229                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
5230                                         // send it immediately instead of waiting for a best_block_updated call (which
5231                                         // may have already happened for this block).
5232                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
5233                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
5234                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
5235                                                 return Ok((Some(channel_ready), announcement_sigs));
5236                                         }
5237                                 }
5238                                 for inp in tx.input.iter() {
5239                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5240                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
5241                                                 return Err(ClosureReason::CommitmentTxConfirmed);
5242                                         }
5243                                 }
5244                         }
5245                 }
5246                 Ok((None, None))
5247         }
5248
5249         /// When a new block is connected, we check the height of the block against outbound holding
5250         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5251         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5252         /// handled by the ChannelMonitor.
5253         ///
5254         /// If we return Err, the channel may have been closed, at which point the standard
5255         /// requirements apply - no calls may be made except those explicitly stated to be allowed
5256         /// post-shutdown.
5257         ///
5258         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5259         /// back.
5260         pub fn best_block_updated<NS: Deref, L: Deref>(
5261                 &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
5262                 node_signer: &NS, user_config: &UserConfig, logger: &L
5263         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5264         where
5265                 NS::Target: NodeSigner,
5266                 L::Target: Logger
5267         {
5268                 self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
5269         }
5270
5271         fn do_best_block_updated<NS: Deref, L: Deref>(
5272                 &mut self, height: u32, highest_header_time: u32,
5273                 genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
5274         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5275         where
5276                 NS::Target: NodeSigner,
5277                 L::Target: Logger
5278         {
5279                 let mut timed_out_htlcs = Vec::new();
5280                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5281                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5282                 // ~now.
5283                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5284                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5285                         match htlc_update {
5286                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5287                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5288                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5289                                                 false
5290                                         } else { true }
5291                                 },
5292                                 _ => true
5293                         }
5294                 });
5295
5296                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5297
5298                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5299                         let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
5300                                 self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
5301                         } else { None };
5302                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
5303                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5304                 }
5305
5306                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5307                 if non_shutdown_state >= ChannelState::ChannelReady as u32 ||
5308                    (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5309                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5310                         if self.context.funding_tx_confirmation_height == 0 {
5311                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5312                                 // zero if it has been reorged out, however in either case, our state flags
5313                                 // indicate we've already sent a channel_ready
5314                                 funding_tx_confirmations = 0;
5315                         }
5316
5317                         // If we've sent channel_ready (or have both sent and received channel_ready), and
5318                         // the funding transaction has become unconfirmed,
5319                         // close the channel and hope we can get the latest state on chain (because presumably
5320                         // the funding transaction is at least still in the mempool of most nodes).
5321                         //
5322                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5323                         // 0-conf channel, but not doing so may lead to the
5324                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
5325                         // to.
5326                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5327                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5328                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5329                                 return Err(ClosureReason::ProcessingError { err: err_reason });
5330                         }
5331                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5332                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5333                         log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id));
5334                         // If funding_tx_confirmed_in is unset, the channel must not be active
5335                         assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
5336                         assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5337                         return Err(ClosureReason::FundingTimedOut);
5338                 }
5339
5340                 let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
5341                         self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
5342                 } else { None };
5343                 Ok((None, timed_out_htlcs, announcement_sigs))
5344         }
5345
5346         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5347         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5348         /// before the channel has reached channel_ready and we can just wait for more blocks.
5349         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5350                 if self.context.funding_tx_confirmation_height != 0 {
5351                         // We handle the funding disconnection by calling best_block_updated with a height one
5352                         // below where our funding was connected, implying a reorg back to conf_height - 1.
5353                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
5354                         // We use the time field to bump the current time we set on channel updates if its
5355                         // larger. If we don't know that time has moved forward, we can just set it to the last
5356                         // time we saw and it will be ignored.
5357                         let best_time = self.context.update_time_counter;
5358                         match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
5359                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5360                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5361                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5362                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5363                                         Ok(())
5364                                 },
5365                                 Err(e) => Err(e)
5366                         }
5367                 } else {
5368                         // We never learned about the funding confirmation anyway, just ignore
5369                         Ok(())
5370                 }
5371         }
5372
5373         // Methods to get unprompted messages to send to the remote end (or where we already returned
5374         // something in the handler for the message that prompted this message):
5375
5376         pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
5377                 if !self.context.is_outbound() {
5378                         panic!("Tried to open a channel for an inbound channel?");
5379                 }
5380                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
5381                         panic!("Cannot generate an open_channel after we've moved forward");
5382                 }
5383
5384                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5385                         panic!("Tried to send an open_channel for a channel that has already advanced");
5386                 }
5387
5388                 let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5389                 let keys = self.context.get_holder_pubkeys();
5390
5391                 msgs::OpenChannel {
5392                         chain_hash,
5393                         temporary_channel_id: self.context.channel_id,
5394                         funding_satoshis: self.context.channel_value_satoshis,
5395                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
5396                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
5397                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
5398                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
5399                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
5400                         feerate_per_kw: self.context.feerate_per_kw as u32,
5401                         to_self_delay: self.context.get_holder_selected_contest_delay(),
5402                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
5403                         funding_pubkey: keys.funding_pubkey,
5404                         revocation_basepoint: keys.revocation_basepoint,
5405                         payment_point: keys.payment_point,
5406                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
5407                         htlc_basepoint: keys.htlc_basepoint,
5408                         first_per_commitment_point,
5409                         channel_flags: if self.context.config.announced_channel {1} else {0},
5410                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
5411                                 Some(script) => script.clone().into_inner(),
5412                                 None => Builder::new().into_script(),
5413                         }),
5414                         channel_type: Some(self.context.channel_type.clone()),
5415                 }
5416         }
5417
5418         pub fn inbound_is_awaiting_accept(&self) -> bool {
5419                 self.context.inbound_awaiting_accept
5420         }
5421
5422         /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
5423         pub fn set_0conf(&mut self) {
5424                 assert!(self.context.inbound_awaiting_accept);
5425                 self.context.minimum_depth = Some(0);
5426         }
5427
5428         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
5429         /// should be sent back to the counterparty node.
5430         ///
5431         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5432         pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
5433                 if self.context.is_outbound() {
5434                         panic!("Tried to send accept_channel for an outbound channel?");
5435                 }
5436                 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
5437                         panic!("Tried to send accept_channel after channel had moved forward");
5438                 }
5439                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5440                         panic!("Tried to send an accept_channel for a channel that has already advanced");
5441                 }
5442                 if !self.context.inbound_awaiting_accept {
5443                         panic!("The inbound channel has already been accepted");
5444                 }
5445
5446                 self.context.user_id = user_id;
5447                 self.context.inbound_awaiting_accept = false;
5448
5449                 self.generate_accept_channel_message()
5450         }
5451
5452         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
5453         /// inbound channel. If the intention is to accept an inbound channel, use
5454         /// [`Channel::accept_inbound_channel`] instead.
5455         ///
5456         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5457         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
5458                 let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5459                 let keys = self.context.get_holder_pubkeys();
5460
5461                 msgs::AcceptChannel {
5462                         temporary_channel_id: self.context.channel_id,
5463                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
5464                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
5465                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
5466                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
5467                         minimum_depth: self.context.minimum_depth.unwrap(),
5468                         to_self_delay: self.context.get_holder_selected_contest_delay(),
5469                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
5470                         funding_pubkey: keys.funding_pubkey,
5471                         revocation_basepoint: keys.revocation_basepoint,
5472                         payment_point: keys.payment_point,
5473                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
5474                         htlc_basepoint: keys.htlc_basepoint,
5475                         first_per_commitment_point,
5476                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
5477                                 Some(script) => script.clone().into_inner(),
5478                                 None => Builder::new().into_script(),
5479                         }),
5480                         channel_type: Some(self.context.channel_type.clone()),
5481                         #[cfg(taproot)]
5482                         next_local_nonce: None,
5483                 }
5484         }
5485
5486         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
5487         /// inbound channel without accepting it.
5488         ///
5489         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5490         #[cfg(test)]
5491         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
5492                 self.generate_accept_channel_message()
5493         }
5494
5495         /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
5496         fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5497                 let counterparty_keys = self.context.build_remote_transaction_keys();
5498                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5499                 Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5500                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5501         }
5502
5503         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5504         /// a funding_created message for the remote peer.
5505         /// Panics if called at some time other than immediately after initial handshake, if called twice,
5506         /// or if called on an inbound channel.
5507         /// Note that channel_id changes during this call!
5508         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5509         /// If an Err is returned, it is a ChannelError::Close.
5510         pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
5511                 if !self.context.is_outbound() {
5512                         panic!("Tried to create outbound funding_created message on an inbound channel!");
5513                 }
5514                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5515                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5516                 }
5517                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5518                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5519                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5520                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5521                 }
5522
5523                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5524                 self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
5525
5526                 let signature = match self.get_outbound_funding_created_signature(logger) {
5527                         Ok(res) => res,
5528                         Err(e) => {
5529                                 log_error!(logger, "Got bad signatures: {:?}!", e);
5530                                 self.context.channel_transaction_parameters.funding_outpoint = None;
5531                                 return Err(e);
5532                         }
5533                 };
5534
5535                 let temporary_channel_id = self.context.channel_id;
5536
5537                 // Now that we're past error-generating stuff, update our local state:
5538
5539                 self.context.channel_state = ChannelState::FundingCreated as u32;
5540                 self.context.channel_id = funding_txo.to_channel_id();
5541                 self.context.funding_transaction = Some(funding_transaction);
5542
5543                 Ok(msgs::FundingCreated {
5544                         temporary_channel_id,
5545                         funding_txid: funding_txo.txid,
5546                         funding_output_index: funding_txo.index,
5547                         signature,
5548                         #[cfg(taproot)]
5549                         partial_signature_with_nonce: None,
5550                         #[cfg(taproot)]
5551                         next_local_nonce: None,
5552                 })
5553         }
5554
5555         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5556         /// announceable and available for use (have exchanged ChannelReady messages in both
5557         /// directions). Should be used for both broadcasted announcements and in response to an
5558         /// AnnouncementSignatures message from the remote peer.
5559         ///
5560         /// Will only fail if we're not in a state where channel_announcement may be sent (including
5561         /// closing).
5562         ///
5563         /// This will only return ChannelError::Ignore upon failure.
5564         fn get_channel_announcement<NS: Deref>(
5565                 &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
5566         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5567                 if !self.context.config.announced_channel {
5568                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5569                 }
5570                 if !self.context.is_usable() {
5571                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5572                 }
5573
5574                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5575                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5576                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5577                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5578
5579                 let msg = msgs::UnsignedChannelAnnouncement {
5580                         features: channelmanager::provided_channel_features(&user_config),
5581                         chain_hash,
5582                         short_channel_id: self.context.get_short_channel_id().unwrap(),
5583                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5584                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5585                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5586                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5587                         excess_data: Vec::new(),
5588                 };
5589
5590                 Ok(msg)
5591         }
5592
5593         fn get_announcement_sigs<NS: Deref, L: Deref>(
5594                 &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
5595                 best_block_height: u32, logger: &L
5596         ) -> Option<msgs::AnnouncementSignatures>
5597         where
5598                 NS::Target: NodeSigner,
5599                 L::Target: Logger
5600         {
5601                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5602                         return None;
5603                 }
5604
5605                 if !self.context.is_usable() {
5606                         return None;
5607                 }
5608
5609                 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5610                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5611                         return None;
5612                 }
5613
5614                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5615                         return None;
5616                 }
5617
5618                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
5619                 let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
5620                         Ok(a) => a,
5621                         Err(e) => {
5622                                 log_trace!(logger, "{:?}", e);
5623                                 return None;
5624                         }
5625                 };
5626                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5627                         Err(_) => {
5628                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5629                                 return None;
5630                         },
5631                         Ok(v) => v
5632                 };
5633                 let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5634                         Err(_) => {
5635                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5636                                 return None;
5637                         },
5638                         Ok(v) => v
5639                 };
5640                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5641
5642                 Some(msgs::AnnouncementSignatures {
5643                         channel_id: self.context.channel_id(),
5644                         short_channel_id: self.context.get_short_channel_id().unwrap(),
5645                         node_signature: our_node_sig,
5646                         bitcoin_signature: our_bitcoin_sig,
5647                 })
5648         }
5649
5650         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5651         /// available.
5652         fn sign_channel_announcement<NS: Deref>(
5653                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5654         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5655                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5656                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5657                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5658                         let were_node_one = announcement.node_id_1 == our_node_key;
5659
5660                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5661                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5662                         let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5663                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5664                         Ok(msgs::ChannelAnnouncement {
5665                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5666                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5667                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5668                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5669                                 contents: announcement,
5670                         })
5671                 } else {
5672                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5673                 }
5674         }
5675
5676         /// Processes an incoming announcement_signatures message, providing a fully-signed
5677         /// channel_announcement message which we can broadcast and storing our counterparty's
5678         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5679         pub fn announcement_signatures<NS: Deref>(
5680                 &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
5681                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5682         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5683                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5684
5685                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5686
5687                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5688                         return Err(ChannelError::Close(format!(
5689                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5690                                  &announcement, self.context.get_counterparty_node_id())));
5691                 }
5692                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5693                         return Err(ChannelError::Close(format!(
5694                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5695                                 &announcement, self.context.counterparty_funding_pubkey())));
5696                 }
5697
5698                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5699                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5700                         return Err(ChannelError::Ignore(
5701                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5702                 }
5703
5704                 self.sign_channel_announcement(node_signer, announcement)
5705         }
5706
5707         /// Gets a signed channel_announcement for this channel, if we previously received an
5708         /// announcement_signatures from our counterparty.
5709         pub fn get_signed_channel_announcement<NS: Deref>(
5710                 &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
5711         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5712                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5713                         return None;
5714                 }
5715                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5716                         Ok(res) => res,
5717                         Err(_) => return None,
5718                 };
5719                 match self.sign_channel_announcement(node_signer, announcement) {
5720                         Ok(res) => Some(res),
5721                         Err(_) => None,
5722                 }
5723         }
5724
5725         /// May panic if called on a channel that wasn't immediately-previously
5726         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5727         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5728                 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5729                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5730                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5731                 // current to_remote balances. However, it no longer has any use, and thus is now simply
5732                 // set to a dummy (but valid, as required by the spec) public key.
5733                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5734                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5735                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5736                 let mut pk = [2; 33]; pk[1] = 0xff;
5737                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5738                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5739                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5740                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
5741                         remote_last_secret
5742                 } else {
5743                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
5744                         [0;32]
5745                 };
5746                 self.mark_awaiting_response();
5747                 msgs::ChannelReestablish {
5748                         channel_id: self.context.channel_id(),
5749                         // The protocol has two different commitment number concepts - the "commitment
5750                         // transaction number", which starts from 0 and counts up, and the "revocation key
5751                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5752                         // commitment transaction numbers by the index which will be used to reveal the
5753                         // revocation key for that commitment transaction, which means we have to convert them
5754                         // to protocol-level commitment numbers here...
5755
5756                         // next_local_commitment_number is the next commitment_signed number we expect to
5757                         // receive (indicating if they need to resend one that we missed).
5758                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5759                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5760                         // receive, however we track it by the next commitment number for a remote transaction
5761                         // (which is one further, as they always revoke previous commitment transaction, not
5762                         // the one we send) so we have to decrement by 1. Note that if
5763                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5764                         // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5765                         // overflow here.
5766                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5767                         your_last_per_commitment_secret: remote_last_secret,
5768                         my_current_per_commitment_point: dummy_pubkey,
5769                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5770                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5771                         // txid of that interactive transaction, else we MUST NOT set it.
5772                         next_funding_txid: None,
5773                 }
5774         }
5775
5776
5777         // Send stuff to our remote peers:
5778
5779         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5780         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5781         /// commitment update.
5782         ///
5783         /// `Err`s will only be [`ChannelError::Ignore`].
5784         pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5785                 onion_routing_packet: msgs::OnionPacket, logger: &L)
5786         -> Result<(), ChannelError> where L::Target: Logger {
5787                 self
5788                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
5789                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5790                         .map_err(|err| {
5791                                 if let ChannelError::Ignore(_) = err { /* fine */ }
5792                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5793                                 err
5794                         })
5795         }
5796
5797         /// Adds a pending outbound HTLC to this channel, note that you probably want
5798         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5799         ///
5800         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5801         /// the wire:
5802         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5803         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5804         ///   awaiting ACK.
5805         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5806         ///   we may not yet have sent the previous commitment update messages and will need to
5807         ///   regenerate them.
5808         ///
5809         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5810         /// on this [`Channel`] if `force_holding_cell` is false.
5811         ///
5812         /// `Err`s will only be [`ChannelError::Ignore`].
5813         fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5814                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
5815         -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
5816                 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5817                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5818                 }
5819                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5820                 if amount_msat > channel_total_msat {
5821                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5822                 }
5823
5824                 if amount_msat == 0 {
5825                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5826                 }
5827
5828                 let available_balances = self.context.get_available_balances();
5829                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5830                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5831                                 available_balances.next_outbound_htlc_minimum_msat)));
5832                 }
5833
5834                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5835                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5836                                 available_balances.next_outbound_htlc_limit_msat)));
5837                 }
5838
5839                 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5840                         // Note that this should never really happen, if we're !is_live() on receipt of an
5841                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5842                         // the user to send directly into a !is_live() channel. However, if we
5843                         // disconnected during the time the previous hop was doing the commitment dance we may
5844                         // end up getting here after the forwarding delay. In any case, returning an
5845                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5846                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5847                 }
5848
5849                 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5850                 log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
5851                         if force_holding_cell { "into holding cell" }
5852                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5853                         else { "to peer" });
5854
5855                 if need_holding_cell {
5856                         force_holding_cell = true;
5857                 }
5858
5859                 // Now update local state:
5860                 if force_holding_cell {
5861                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5862                                 amount_msat,
5863                                 payment_hash,
5864                                 cltv_expiry,
5865                                 source,
5866                                 onion_routing_packet,
5867                         });
5868                         return Ok(None);
5869                 }
5870
5871                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5872                         htlc_id: self.context.next_holder_htlc_id,
5873                         amount_msat,
5874                         payment_hash: payment_hash.clone(),
5875                         cltv_expiry,
5876                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5877                         source,
5878                 });
5879
5880                 let res = msgs::UpdateAddHTLC {
5881                         channel_id: self.context.channel_id,
5882                         htlc_id: self.context.next_holder_htlc_id,
5883                         amount_msat,
5884                         payment_hash,
5885                         cltv_expiry,
5886                         onion_routing_packet,
5887                 };
5888                 self.context.next_holder_htlc_id += 1;
5889
5890                 Ok(Some(res))
5891         }
5892
5893         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5894                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5895                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5896                 // fail to generate this, we still are at least at a position where upgrading their status
5897                 // is acceptable.
5898                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5899                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5900                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5901                         } else { None };
5902                         if let Some(state) = new_state {
5903                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
5904                                 htlc.state = state;
5905                         }
5906                 }
5907                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5908                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5909                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
5910                                 // Grab the preimage, if it exists, instead of cloning
5911                                 let mut reason = OutboundHTLCOutcome::Success(None);
5912                                 mem::swap(outcome, &mut reason);
5913                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5914                         }
5915                 }
5916                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5917                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5918                                 debug_assert!(!self.context.is_outbound());
5919                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5920                                 self.context.feerate_per_kw = feerate;
5921                                 self.context.pending_update_fee = None;
5922                         }
5923                 }
5924                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5925
5926                 let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
5927                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5928                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5929
5930                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5931                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5932                 }
5933
5934                 self.context.latest_monitor_update_id += 1;
5935                 let monitor_update = ChannelMonitorUpdate {
5936                         update_id: self.context.latest_monitor_update_id,
5937                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5938                                 commitment_txid: counterparty_commitment_txid,
5939                                 htlc_outputs: htlcs.clone(),
5940                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5941                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
5942                         }]
5943                 };
5944                 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5945                 monitor_update
5946         }
5947
5948         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
5949                 let counterparty_keys = self.context.build_remote_transaction_keys();
5950                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5951                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5952
5953                 #[cfg(any(test, fuzzing))]
5954                 {
5955                         if !self.context.is_outbound() {
5956                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5957                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5958                                 if let Some(info) = projected_commit_tx_info {
5959                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5960                                         if info.total_pending_htlcs == total_pending_htlcs
5961                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5962                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5963                                                 && info.feerate == self.context.feerate_per_kw {
5964                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
5965                                                         assert_eq!(actual_fee, info.fee);
5966                                                 }
5967                                 }
5968                         }
5969                 }
5970
5971                 (counterparty_commitment_txid, commitment_stats.htlcs_included)
5972         }
5973
5974         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5975         /// generation when we shouldn't change HTLC/channel state.
5976         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5977                 // Get the fee tests from `build_commitment_no_state_update`
5978                 #[cfg(any(test, fuzzing))]
5979                 self.build_commitment_no_state_update(logger);
5980
5981                 let counterparty_keys = self.context.build_remote_transaction_keys();
5982                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5983                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5984                 let (signature, htlc_signatures);
5985
5986                 {
5987                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5988                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5989                                 htlcs.push(htlc);
5990                         }
5991
5992                         let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5993                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
5994                         signature = res.0;
5995                         htlc_signatures = res.1;
5996
5997                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5998                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5999                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6000                                 log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
6001
6002                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6003                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6004                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6005                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
6006                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
6007                                         log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
6008                         }
6009                 }
6010
6011                 Ok((msgs::CommitmentSigned {
6012                         channel_id: self.context.channel_id,
6013                         signature,
6014                         htlc_signatures,
6015                         #[cfg(taproot)]
6016                         partial_signature_with_nonce: None,
6017                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6018         }
6019
6020         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6021         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6022         ///
6023         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6024         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6025         pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
6026                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
6027                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6028                 match send_res? {
6029                         Some(_) => {
6030                                 let monitor_update = self.build_commitment_no_status_check(logger);
6031                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6032                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
6033                         },
6034                         None => Ok(None)
6035                 }
6036         }
6037
6038         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
6039                 if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
6040                         return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
6041                 }
6042                 self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
6043                         fee_base_msat: msg.contents.fee_base_msat,
6044                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6045                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
6046                 });
6047
6048                 Ok(())
6049         }
6050
6051         /// Begins the shutdown process, getting a message for the remote peer and returning all
6052         /// holding cell HTLCs for payment failure.
6053         ///
6054         /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
6055         /// [`ChannelMonitorUpdate`] will be returned).
6056         pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6057                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6058         -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6059         where SP::Target: SignerProvider {
6060                 for htlc in self.context.pending_outbound_htlcs.iter() {
6061                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6062                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6063                         }
6064                 }
6065                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
6066                         if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
6067                                 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6068                         }
6069                         else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
6070                                 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6071                         }
6072                 }
6073                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6074                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6075                 }
6076                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
6077                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
6078                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6079                 }
6080
6081                 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
6082                 // script is set, we just force-close and call it a day.
6083                 let mut chan_closed = false;
6084                 if self.context.channel_state < ChannelState::FundingSent as u32 {
6085                         chan_closed = true;
6086                 }
6087
6088                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6089                         Some(_) => false,
6090                         None if !chan_closed => {
6091                                 // use override shutdown script if provided
6092                                 let shutdown_scriptpubkey = match override_shutdown_script {
6093                                         Some(script) => script,
6094                                         None => {
6095                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
6096                                                 match signer_provider.get_shutdown_scriptpubkey() {
6097                                                         Ok(scriptpubkey) => scriptpubkey,
6098                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6099                                                 }
6100                                         },
6101                                 };
6102                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
6103                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6104                                 }
6105                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6106                                 true
6107                         },
6108                         None => false,
6109                 };
6110
6111                 // From here on out, we may not fail!
6112                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6113                 if self.context.channel_state < ChannelState::FundingSent as u32 {
6114                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
6115                 } else {
6116                         self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
6117                 }
6118                 self.context.update_time_counter += 1;
6119
6120                 let monitor_update = if update_shutdown_script {
6121                         self.context.latest_monitor_update_id += 1;
6122                         let monitor_update = ChannelMonitorUpdate {
6123                                 update_id: self.context.latest_monitor_update_id,
6124                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6125                                         scriptpubkey: self.get_closing_scriptpubkey(),
6126                                 }],
6127                         };
6128                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6129                         if self.push_blockable_mon_update(monitor_update) {
6130                                 self.context.pending_monitor_updates.last().map(|upd| &upd.update)
6131                         } else { None }
6132                 } else { None };
6133                 let shutdown = msgs::Shutdown {
6134                         channel_id: self.context.channel_id,
6135                         scriptpubkey: self.get_closing_scriptpubkey(),
6136                 };
6137
6138                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6139                 // our shutdown until we've committed all of the pending changes.
6140                 self.context.holding_cell_update_fee = None;
6141                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6142                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6143                         match htlc_update {
6144                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6145                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6146                                         false
6147                                 },
6148                                 _ => true
6149                         }
6150                 });
6151
6152                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6153                         "we can't both complete shutdown and return a monitor update");
6154
6155                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6156         }
6157
6158         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
6159         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
6160         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
6161         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
6162         /// immediately (others we will have to allow to time out).
6163         pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
6164                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
6165                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
6166                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
6167                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
6168                 assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32);
6169
6170                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
6171                 // return them to fail the payment.
6172                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6173                 let counterparty_node_id = self.context.get_counterparty_node_id();
6174                 for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
6175                         match htlc_update {
6176                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
6177                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id));
6178                                 },
6179                                 _ => {}
6180                         }
6181                 }
6182                 let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
6183                         // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
6184                         // returning a channel monitor update here would imply a channel monitor update before
6185                         // we even registered the channel monitor to begin with, which is invalid.
6186                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
6187                         // funding transaction, don't return a funding txo (which prevents providing the
6188                         // monitor update to the user, even if we return one).
6189                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
6190                         if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
6191                                 self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
6192                                 Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
6193                                         update_id: self.context.latest_monitor_update_id,
6194                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
6195                                 }))
6196                         } else { None }
6197                 } else { None };
6198
6199                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
6200                 self.context.update_time_counter += 1;
6201                 (monitor_update, dropped_outbound_htlcs)
6202         }
6203
6204         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6205                 self.context.holding_cell_htlc_updates.iter()
6206                         .flat_map(|htlc_update| {
6207                                 match htlc_update {
6208                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6209                                                 => Some((source, payment_hash)),
6210                                         _ => None,
6211                                 }
6212                         })
6213                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6214         }
6215 }
6216
6217 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6218 pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
6219         pub context: ChannelContext<Signer>,
6220 }
6221
6222 impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
6223         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6224                 // The default channel type (ie the first one we try) depends on whether the channel is
6225                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6226                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6227                 // with no other changes, and fall back to `only_static_remotekey`.
6228                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6229                 if !config.channel_handshake_config.announced_channel &&
6230                         config.channel_handshake_config.negotiate_scid_privacy &&
6231                         their_features.supports_scid_privacy() {
6232                         ret.set_scid_privacy_required();
6233                 }
6234
6235                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6236                 // set it now. If they don't understand it, we'll fall back to our default of
6237                 // `only_static_remotekey`.
6238                 #[cfg(anchors)]
6239                 { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
6240                         if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6241                                 their_features.supports_anchors_zero_fee_htlc_tx() {
6242                                 ret.set_anchors_zero_fee_htlc_tx_required();
6243                         }
6244                 }
6245
6246                 ret
6247         }
6248
6249         pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
6250                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6251                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6252                 outbound_scid_alias: u64
6253         ) -> Result<Channel<Signer>, APIError>
6254         where ES::Target: EntropySource,
6255               SP::Target: SignerProvider<Signer = Signer>,
6256               F::Target: FeeEstimator,
6257         {
6258                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6259                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6260                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6261                 let pubkeys = holder_signer.pubkeys().clone();
6262
6263                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6264                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6265                 }
6266                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6267                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6268                 }
6269                 let channel_value_msat = channel_value_satoshis * 1000;
6270                 if push_msat > channel_value_msat {
6271                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6272                 }
6273                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6274                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6275                 }
6276                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6277                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6278                         // Protocol level safety check in place, although it should never happen because
6279                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6280                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6281                 }
6282
6283                 let channel_type = Self::get_initial_channel_type(&config, their_features);
6284                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6285
6286                 let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
6287
6288                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6289                 let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
6290                 if value_to_self_msat < commitment_tx_fee {
6291                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6292                 }
6293
6294                 let mut secp_ctx = Secp256k1::new();
6295                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6296
6297                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6298                         match signer_provider.get_shutdown_scriptpubkey() {
6299                                 Ok(scriptpubkey) => Some(scriptpubkey),
6300                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6301                         }
6302                 } else { None };
6303
6304                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6305                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6306                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6307                         }
6308                 }
6309
6310                 let destination_script = match signer_provider.get_destination_script() {
6311                         Ok(script) => script,
6312                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6313                 };
6314
6315                 let temporary_channel_id = entropy_source.get_secure_random_bytes();
6316
6317                 Ok(Channel {
6318                         context: ChannelContext {
6319                                 user_id,
6320
6321                                 config: LegacyChannelConfig {
6322                                         options: config.channel_config.clone(),
6323                                         announced_channel: config.channel_handshake_config.announced_channel,
6324                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6325                                 },
6326
6327                                 prev_config: None,
6328
6329                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6330
6331                                 channel_id: temporary_channel_id,
6332                                 temporary_channel_id: Some(temporary_channel_id),
6333                                 channel_state: ChannelState::OurInitSent as u32,
6334                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6335                                 secp_ctx,
6336                                 channel_value_satoshis,
6337
6338                                 latest_monitor_update_id: 0,
6339
6340                                 holder_signer,
6341                                 shutdown_scriptpubkey,
6342                                 destination_script,
6343
6344                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6345                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6346                                 value_to_self_msat,
6347
6348                                 pending_inbound_htlcs: Vec::new(),
6349                                 pending_outbound_htlcs: Vec::new(),
6350                                 holding_cell_htlc_updates: Vec::new(),
6351                                 pending_update_fee: None,
6352                                 holding_cell_update_fee: None,
6353                                 next_holder_htlc_id: 0,
6354                                 next_counterparty_htlc_id: 0,
6355                                 update_time_counter: 1,
6356
6357                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6358
6359                                 monitor_pending_channel_ready: false,
6360                                 monitor_pending_revoke_and_ack: false,
6361                                 monitor_pending_commitment_signed: false,
6362                                 monitor_pending_forwards: Vec::new(),
6363                                 monitor_pending_failures: Vec::new(),
6364                                 monitor_pending_finalized_fulfills: Vec::new(),
6365
6366                                 #[cfg(debug_assertions)]
6367                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6368                                 #[cfg(debug_assertions)]
6369                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6370
6371                                 last_sent_closing_fee: None,
6372                                 pending_counterparty_closing_signed: None,
6373                                 closing_fee_limits: None,
6374                                 target_closing_feerate_sats_per_kw: None,
6375
6376                                 inbound_awaiting_accept: false,
6377
6378                                 funding_tx_confirmed_in: None,
6379                                 funding_tx_confirmation_height: 0,
6380                                 short_channel_id: None,
6381                                 channel_creation_height: current_chain_height,
6382
6383                                 feerate_per_kw: feerate,
6384                                 counterparty_dust_limit_satoshis: 0,
6385                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6386                                 counterparty_max_htlc_value_in_flight_msat: 0,
6387                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6388                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6389                                 holder_selected_channel_reserve_satoshis,
6390                                 counterparty_htlc_minimum_msat: 0,
6391                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6392                                 counterparty_max_accepted_htlcs: 0,
6393                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6394                                 minimum_depth: None, // Filled in in accept_channel
6395
6396                                 counterparty_forwarding_info: None,
6397
6398                                 channel_transaction_parameters: ChannelTransactionParameters {
6399                                         holder_pubkeys: pubkeys,
6400                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6401                                         is_outbound_from_holder: true,
6402                                         counterparty_parameters: None,
6403                                         funding_outpoint: None,
6404                                         opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
6405                                         opt_non_zero_fee_anchors: None
6406                                 },
6407                                 funding_transaction: None,
6408
6409                                 counterparty_cur_commitment_point: None,
6410                                 counterparty_prev_commitment_point: None,
6411                                 counterparty_node_id,
6412
6413                                 counterparty_shutdown_scriptpubkey: None,
6414
6415                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6416
6417                                 channel_update_status: ChannelUpdateStatus::Enabled,
6418                                 closing_signed_in_flight: false,
6419
6420                                 announcement_sigs: None,
6421
6422                                 #[cfg(any(test, fuzzing))]
6423                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6424                                 #[cfg(any(test, fuzzing))]
6425                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6426
6427                                 workaround_lnd_bug_4006: None,
6428                                 sent_message_awaiting_response: None,
6429
6430                                 latest_inbound_scid_alias: None,
6431                                 outbound_scid_alias,
6432
6433                                 channel_pending_event_emitted: false,
6434                                 channel_ready_event_emitted: false,
6435
6436                                 #[cfg(any(test, fuzzing))]
6437                                 historical_inbound_htlc_fulfills: HashSet::new(),
6438
6439                                 channel_type,
6440                                 channel_keys_id,
6441
6442                                 pending_monitor_updates: Vec::new(),
6443                         }
6444                 })
6445         }
6446 }
6447
6448 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6449 pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
6450         pub context: ChannelContext<Signer>,
6451 }
6452
6453 impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {}
6454
6455 const SERIALIZATION_VERSION: u8 = 3;
6456 const MIN_SERIALIZATION_VERSION: u8 = 2;
6457
6458 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6459         (0, FailRelay),
6460         (1, FailMalformed),
6461         (2, Fulfill),
6462 );
6463
6464 impl Writeable for ChannelUpdateStatus {
6465         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6466                 // We only care about writing out the current state as it was announced, ie only either
6467                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6468                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6469                 match self {
6470                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6471                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6472                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6473                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6474                 }
6475                 Ok(())
6476         }
6477 }
6478
6479 impl Readable for ChannelUpdateStatus {
6480         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6481                 Ok(match <u8 as Readable>::read(reader)? {
6482                         0 => ChannelUpdateStatus::Enabled,
6483                         1 => ChannelUpdateStatus::Disabled,
6484                         _ => return Err(DecodeError::InvalidValue),
6485                 })
6486         }
6487 }
6488
6489 impl Writeable for AnnouncementSigsState {
6490         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6491                 // We only care about writing out the current state as if we had just disconnected, at
6492                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6493                 match self {
6494                         AnnouncementSigsState::NotSent => 0u8.write(writer),
6495                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
6496                         AnnouncementSigsState::Committed => 0u8.write(writer),
6497                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6498                 }
6499         }
6500 }
6501
6502 impl Readable for AnnouncementSigsState {
6503         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6504                 Ok(match <u8 as Readable>::read(reader)? {
6505                         0 => AnnouncementSigsState::NotSent,
6506                         1 => AnnouncementSigsState::PeerReceived,
6507                         _ => return Err(DecodeError::InvalidValue),
6508                 })
6509         }
6510 }
6511
6512 impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
6513         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6514                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6515                 // called.
6516
6517                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6518
6519                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6520                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6521                 // the low bytes now and the optional high bytes later.
6522                 let user_id_low = self.context.user_id as u64;
6523                 user_id_low.write(writer)?;
6524
6525                 // Version 1 deserializers expected to read parts of the config object here. Version 2
6526                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6527                 // `minimum_depth` we simply write dummy values here.
6528                 writer.write_all(&[0; 8])?;
6529
6530                 self.context.channel_id.write(writer)?;
6531                 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6532                 self.context.channel_value_satoshis.write(writer)?;
6533
6534                 self.context.latest_monitor_update_id.write(writer)?;
6535
6536                 let mut key_data = VecWriter(Vec::new());
6537                 self.context.holder_signer.write(&mut key_data)?;
6538                 assert!(key_data.0.len() < core::usize::MAX);
6539                 assert!(key_data.0.len() < core::u32::MAX as usize);
6540                 (key_data.0.len() as u32).write(writer)?;
6541                 writer.write_all(&key_data.0[..])?;
6542
6543                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6544                 // deserialized from that format.
6545                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6546                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6547                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6548                 }
6549                 self.context.destination_script.write(writer)?;
6550
6551                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6552                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6553                 self.context.value_to_self_msat.write(writer)?;
6554
6555                 let mut dropped_inbound_htlcs = 0;
6556                 for htlc in self.context.pending_inbound_htlcs.iter() {
6557                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6558                                 dropped_inbound_htlcs += 1;
6559                         }
6560                 }
6561                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6562                 for htlc in self.context.pending_inbound_htlcs.iter() {
6563                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6564                                 continue; // Drop
6565                         }
6566                         htlc.htlc_id.write(writer)?;
6567                         htlc.amount_msat.write(writer)?;
6568                         htlc.cltv_expiry.write(writer)?;
6569                         htlc.payment_hash.write(writer)?;
6570                         match &htlc.state {
6571                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6572                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6573                                         1u8.write(writer)?;
6574                                         htlc_state.write(writer)?;
6575                                 },
6576                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6577                                         2u8.write(writer)?;
6578                                         htlc_state.write(writer)?;
6579                                 },
6580                                 &InboundHTLCState::Committed => {
6581                                         3u8.write(writer)?;
6582                                 },
6583                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6584                                         4u8.write(writer)?;
6585                                         removal_reason.write(writer)?;
6586                                 },
6587                         }
6588                 }
6589
6590                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6591
6592                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6593                 for htlc in self.context.pending_outbound_htlcs.iter() {
6594                         htlc.htlc_id.write(writer)?;
6595                         htlc.amount_msat.write(writer)?;
6596                         htlc.cltv_expiry.write(writer)?;
6597                         htlc.payment_hash.write(writer)?;
6598                         htlc.source.write(writer)?;
6599                         match &htlc.state {
6600                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6601                                         0u8.write(writer)?;
6602                                         onion_packet.write(writer)?;
6603                                 },
6604                                 &OutboundHTLCState::Committed => {
6605                                         1u8.write(writer)?;
6606                                 },
6607                                 &OutboundHTLCState::RemoteRemoved(_) => {
6608                                         // Treat this as a Committed because we haven't received the CS - they'll
6609                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6610                                         1u8.write(writer)?;
6611                                 },
6612                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6613                                         3u8.write(writer)?;
6614                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6615                                                 preimages.push(preimage);
6616                                         }
6617                                         let reason: Option<&HTLCFailReason> = outcome.into();
6618                                         reason.write(writer)?;
6619                                 }
6620                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6621                                         4u8.write(writer)?;
6622                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6623                                                 preimages.push(preimage);
6624                                         }
6625                                         let reason: Option<&HTLCFailReason> = outcome.into();
6626                                         reason.write(writer)?;
6627                                 }
6628                         }
6629                 }
6630
6631                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
6632                 for update in self.context.holding_cell_htlc_updates.iter() {
6633                         match update {
6634                                 &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
6635                                         0u8.write(writer)?;
6636                                         amount_msat.write(writer)?;
6637                                         cltv_expiry.write(writer)?;
6638                                         payment_hash.write(writer)?;
6639                                         source.write(writer)?;
6640                                         onion_routing_packet.write(writer)?;
6641                                 },
6642                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
6643                                         1u8.write(writer)?;
6644                                         payment_preimage.write(writer)?;
6645                                         htlc_id.write(writer)?;
6646                                 },
6647                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
6648                                         2u8.write(writer)?;
6649                                         htlc_id.write(writer)?;
6650                                         err_packet.write(writer)?;
6651                                 }
6652                         }
6653                 }
6654
6655                 match self.context.resend_order {
6656                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
6657                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
6658                 }
6659
6660                 self.context.monitor_pending_channel_ready.write(writer)?;
6661                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
6662                 self.context.monitor_pending_commitment_signed.write(writer)?;
6663
6664                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
6665                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
6666                         pending_forward.write(writer)?;
6667                         htlc_id.write(writer)?;
6668                 }
6669
6670                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
6671                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
6672                         htlc_source.write(writer)?;
6673                         payment_hash.write(writer)?;
6674                         fail_reason.write(writer)?;
6675                 }
6676
6677                 if self.context.is_outbound() {
6678                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
6679                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
6680                         Some(feerate).write(writer)?;
6681                 } else {
6682                         // As for inbound HTLCs, if the update was only announced and never committed in a
6683                         // commitment_signed, drop it.
6684                         None::<u32>.write(writer)?;
6685                 }
6686                 self.context.holding_cell_update_fee.write(writer)?;
6687
6688                 self.context.next_holder_htlc_id.write(writer)?;
6689                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
6690                 self.context.update_time_counter.write(writer)?;
6691                 self.context.feerate_per_kw.write(writer)?;
6692
6693                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
6694                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
6695                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
6696                 // consider the stale state on reload.
6697                 0u8.write(writer)?;
6698
6699                 self.context.funding_tx_confirmed_in.write(writer)?;
6700                 self.context.funding_tx_confirmation_height.write(writer)?;
6701                 self.context.short_channel_id.write(writer)?;
6702
6703                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
6704                 self.context.holder_dust_limit_satoshis.write(writer)?;
6705                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
6706
6707                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
6708                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
6709
6710                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
6711                 self.context.holder_htlc_minimum_msat.write(writer)?;
6712                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
6713
6714                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
6715                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
6716
6717                 match &self.context.counterparty_forwarding_info {
6718                         Some(info) => {
6719                                 1u8.write(writer)?;
6720                                 info.fee_base_msat.write(writer)?;
6721                                 info.fee_proportional_millionths.write(writer)?;
6722                                 info.cltv_expiry_delta.write(writer)?;
6723                         },
6724                         None => 0u8.write(writer)?
6725                 }
6726
6727                 self.context.channel_transaction_parameters.write(writer)?;
6728                 self.context.funding_transaction.write(writer)?;
6729
6730                 self.context.counterparty_cur_commitment_point.write(writer)?;
6731                 self.context.counterparty_prev_commitment_point.write(writer)?;
6732                 self.context.counterparty_node_id.write(writer)?;
6733
6734                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
6735
6736                 self.context.commitment_secrets.write(writer)?;
6737
6738                 self.context.channel_update_status.write(writer)?;
6739
6740                 #[cfg(any(test, fuzzing))]
6741                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
6742                 #[cfg(any(test, fuzzing))]
6743                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
6744                         htlc.write(writer)?;
6745                 }
6746
6747                 // If the channel type is something other than only-static-remote-key, then we need to have
6748                 // older clients fail to deserialize this channel at all. If the type is
6749                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
6750                 // out at all.
6751                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
6752                         Some(&self.context.channel_type) } else { None };
6753
6754                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
6755                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
6756                 // a different percentage of the channel value then 10%, which older versions of LDK used
6757                 // to set it to before the percentage was made configurable.
6758                 let serialized_holder_selected_reserve =
6759                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
6760                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
6761
6762                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
6763                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
6764                 let serialized_holder_htlc_max_in_flight =
6765                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
6766                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
6767
6768                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
6769                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
6770
6771                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6772                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
6773                 // we write the high bytes as an option here.
6774                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
6775
6776                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
6777
6778                 write_tlv_fields!(writer, {
6779                         (0, self.context.announcement_sigs, option),
6780                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
6781                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
6782                         // them twice, once with their original default values above, and once as an option
6783                         // here. On the read side, old versions will simply ignore the odd-type entries here,
6784                         // and new versions map the default values to None and allow the TLV entries here to
6785                         // override that.
6786                         (1, self.context.minimum_depth, option),
6787                         (2, chan_type, option),
6788                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
6789                         (4, serialized_holder_selected_reserve, option),
6790                         (5, self.context.config, required),
6791                         (6, serialized_holder_htlc_max_in_flight, option),
6792                         (7, self.context.shutdown_scriptpubkey, option),
6793                         (9, self.context.target_closing_feerate_sats_per_kw, option),
6794                         (11, self.context.monitor_pending_finalized_fulfills, vec_type),
6795                         (13, self.context.channel_creation_height, required),
6796                         (15, preimages, vec_type),
6797                         (17, self.context.announcement_sigs_state, required),
6798                         (19, self.context.latest_inbound_scid_alias, option),
6799                         (21, self.context.outbound_scid_alias, required),
6800                         (23, channel_ready_event_emitted, option),
6801                         (25, user_id_high_opt, option),
6802                         (27, self.context.channel_keys_id, required),
6803                         (28, holder_max_accepted_htlcs, option),
6804                         (29, self.context.temporary_channel_id, option),
6805                         (31, channel_pending_event_emitted, option),
6806                         (33, self.context.pending_monitor_updates, vec_type),
6807                 });
6808
6809                 Ok(())
6810         }
6811 }
6812
6813 const MAX_ALLOC_SIZE: usize = 64*1024;
6814 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<<SP::Target as SignerProvider>::Signer>
6815                 where
6816                         ES::Target: EntropySource,
6817                         SP::Target: SignerProvider
6818 {
6819         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
6820                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
6821                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
6822
6823                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6824                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
6825                 // the low bytes now and the high bytes later.
6826                 let user_id_low: u64 = Readable::read(reader)?;
6827
6828                 let mut config = Some(LegacyChannelConfig::default());
6829                 if ver == 1 {
6830                         // Read the old serialization of the ChannelConfig from version 0.0.98.
6831                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
6832                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
6833                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
6834                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
6835                 } else {
6836                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
6837                         let mut _val: u64 = Readable::read(reader)?;
6838                 }
6839
6840                 let channel_id = Readable::read(reader)?;
6841                 let channel_state = Readable::read(reader)?;
6842                 let channel_value_satoshis = Readable::read(reader)?;
6843
6844                 let latest_monitor_update_id = Readable::read(reader)?;
6845
6846                 let mut keys_data = None;
6847                 if ver <= 2 {
6848                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
6849                         // the `channel_keys_id` TLV is present below.
6850                         let keys_len: u32 = Readable::read(reader)?;
6851                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
6852                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
6853                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
6854                                 let mut data = [0; 1024];
6855                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
6856                                 reader.read_exact(read_slice)?;
6857                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
6858                         }
6859                 }
6860
6861                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
6862                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
6863                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
6864                         Err(_) => None,
6865                 };
6866                 let destination_script = Readable::read(reader)?;
6867
6868                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
6869                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
6870                 let value_to_self_msat = Readable::read(reader)?;
6871
6872                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
6873
6874                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
6875                 for _ in 0..pending_inbound_htlc_count {
6876                         pending_inbound_htlcs.push(InboundHTLCOutput {
6877                                 htlc_id: Readable::read(reader)?,
6878                                 amount_msat: Readable::read(reader)?,
6879                                 cltv_expiry: Readable::read(reader)?,
6880                                 payment_hash: Readable::read(reader)?,
6881                                 state: match <u8 as Readable>::read(reader)? {
6882                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
6883                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
6884                                         3 => InboundHTLCState::Committed,
6885                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
6886                                         _ => return Err(DecodeError::InvalidValue),
6887                                 },
6888                         });
6889                 }
6890
6891                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
6892                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
6893                 for _ in 0..pending_outbound_htlc_count {
6894                         pending_outbound_htlcs.push(OutboundHTLCOutput {
6895                                 htlc_id: Readable::read(reader)?,
6896                                 amount_msat: Readable::read(reader)?,
6897                                 cltv_expiry: Readable::read(reader)?,
6898                                 payment_hash: Readable::read(reader)?,
6899                                 source: Readable::read(reader)?,
6900                                 state: match <u8 as Readable>::read(reader)? {
6901                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
6902                                         1 => OutboundHTLCState::Committed,
6903                                         2 => {
6904                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6905                                                 OutboundHTLCState::RemoteRemoved(option.into())
6906                                         },
6907                                         3 => {
6908                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6909                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
6910                                         },
6911                                         4 => {
6912                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6913                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
6914                                         },
6915                                         _ => return Err(DecodeError::InvalidValue),
6916                                 },
6917                         });
6918                 }
6919
6920                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
6921                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
6922                 for _ in 0..holding_cell_htlc_update_count {
6923                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
6924                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
6925                                         amount_msat: Readable::read(reader)?,
6926                                         cltv_expiry: Readable::read(reader)?,
6927                                         payment_hash: Readable::read(reader)?,
6928                                         source: Readable::read(reader)?,
6929                                         onion_routing_packet: Readable::read(reader)?,
6930                                 },
6931                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
6932                                         payment_preimage: Readable::read(reader)?,
6933                                         htlc_id: Readable::read(reader)?,
6934                                 },
6935                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
6936                                         htlc_id: Readable::read(reader)?,
6937                                         err_packet: Readable::read(reader)?,
6938                                 },
6939                                 _ => return Err(DecodeError::InvalidValue),
6940                         });
6941                 }
6942
6943                 let resend_order = match <u8 as Readable>::read(reader)? {
6944                         0 => RAACommitmentOrder::CommitmentFirst,
6945                         1 => RAACommitmentOrder::RevokeAndACKFirst,
6946                         _ => return Err(DecodeError::InvalidValue),
6947                 };
6948
6949                 let monitor_pending_channel_ready = Readable::read(reader)?;
6950                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
6951                 let monitor_pending_commitment_signed = Readable::read(reader)?;
6952
6953                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
6954                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
6955                 for _ in 0..monitor_pending_forwards_count {
6956                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
6957                 }
6958
6959                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
6960                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
6961                 for _ in 0..monitor_pending_failures_count {
6962                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
6963                 }
6964
6965                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
6966
6967                 let holding_cell_update_fee = Readable::read(reader)?;
6968
6969                 let next_holder_htlc_id = Readable::read(reader)?;
6970                 let next_counterparty_htlc_id = Readable::read(reader)?;
6971                 let update_time_counter = Readable::read(reader)?;
6972                 let feerate_per_kw = Readable::read(reader)?;
6973
6974                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
6975                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
6976                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
6977                 // consider the stale state on reload.
6978                 match <u8 as Readable>::read(reader)? {
6979                         0 => {},
6980                         1 => {
6981                                 let _: u32 = Readable::read(reader)?;
6982                                 let _: u64 = Readable::read(reader)?;
6983                                 let _: Signature = Readable::read(reader)?;
6984                         },
6985                         _ => return Err(DecodeError::InvalidValue),
6986                 }
6987
6988                 let funding_tx_confirmed_in = Readable::read(reader)?;
6989                 let funding_tx_confirmation_height = Readable::read(reader)?;
6990                 let short_channel_id = Readable::read(reader)?;
6991
6992                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
6993                 let holder_dust_limit_satoshis = Readable::read(reader)?;
6994                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
6995                 let mut counterparty_selected_channel_reserve_satoshis = None;
6996                 if ver == 1 {
6997                         // Read the old serialization from version 0.0.98.
6998                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
6999                 } else {
7000                         // Read the 8 bytes of backwards-compatibility data.
7001                         let _dummy: u64 = Readable::read(reader)?;
7002                 }
7003                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7004                 let holder_htlc_minimum_msat = Readable::read(reader)?;
7005                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7006
7007                 let mut minimum_depth = None;
7008                 if ver == 1 {
7009                         // Read the old serialization from version 0.0.98.
7010                         minimum_depth = Some(Readable::read(reader)?);
7011                 } else {
7012                         // Read the 4 bytes of backwards-compatibility data.
7013                         let _dummy: u32 = Readable::read(reader)?;
7014                 }
7015
7016                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7017                         0 => None,
7018                         1 => Some(CounterpartyForwardingInfo {
7019                                 fee_base_msat: Readable::read(reader)?,
7020                                 fee_proportional_millionths: Readable::read(reader)?,
7021                                 cltv_expiry_delta: Readable::read(reader)?,
7022                         }),
7023                         _ => return Err(DecodeError::InvalidValue),
7024                 };
7025
7026                 let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7027                 let funding_transaction = Readable::read(reader)?;
7028
7029                 let counterparty_cur_commitment_point = Readable::read(reader)?;
7030
7031                 let counterparty_prev_commitment_point = Readable::read(reader)?;
7032                 let counterparty_node_id = Readable::read(reader)?;
7033
7034                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7035                 let commitment_secrets = Readable::read(reader)?;
7036
7037                 let channel_update_status = Readable::read(reader)?;
7038
7039                 #[cfg(any(test, fuzzing))]
7040                 let mut historical_inbound_htlc_fulfills = HashSet::new();
7041                 #[cfg(any(test, fuzzing))]
7042                 {
7043                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
7044                         for _ in 0..htlc_fulfills_len {
7045                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7046                         }
7047                 }
7048
7049                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7050                         Some((feerate, if channel_parameters.is_outbound_from_holder {
7051                                 FeeUpdateState::Outbound
7052                         } else {
7053                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7054                         }))
7055                 } else {
7056                         None
7057                 };
7058
7059                 let mut announcement_sigs = None;
7060                 let mut target_closing_feerate_sats_per_kw = None;
7061                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7062                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7063                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7064                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7065                 // only, so we default to that if none was written.
7066                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7067                 let mut channel_creation_height = Some(serialized_height);
7068                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7069
7070                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7071                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7072                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7073                 let mut latest_inbound_scid_alias = None;
7074                 let mut outbound_scid_alias = None;
7075                 let mut channel_pending_event_emitted = None;
7076                 let mut channel_ready_event_emitted = None;
7077
7078                 let mut user_id_high_opt: Option<u64> = None;
7079                 let mut channel_keys_id: Option<[u8; 32]> = None;
7080                 let mut temporary_channel_id: Option<[u8; 32]> = None;
7081                 let mut holder_max_accepted_htlcs: Option<u16> = None;
7082
7083                 let mut pending_monitor_updates = Some(Vec::new());
7084
7085                 read_tlv_fields!(reader, {
7086                         (0, announcement_sigs, option),
7087                         (1, minimum_depth, option),
7088                         (2, channel_type, option),
7089                         (3, counterparty_selected_channel_reserve_satoshis, option),
7090                         (4, holder_selected_channel_reserve_satoshis, option),
7091                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7092                         (6, holder_max_htlc_value_in_flight_msat, option),
7093                         (7, shutdown_scriptpubkey, option),
7094                         (9, target_closing_feerate_sats_per_kw, option),
7095                         (11, monitor_pending_finalized_fulfills, vec_type),
7096                         (13, channel_creation_height, option),
7097                         (15, preimages_opt, vec_type),
7098                         (17, announcement_sigs_state, option),
7099                         (19, latest_inbound_scid_alias, option),
7100                         (21, outbound_scid_alias, option),
7101                         (23, channel_ready_event_emitted, option),
7102                         (25, user_id_high_opt, option),
7103                         (27, channel_keys_id, option),
7104                         (28, holder_max_accepted_htlcs, option),
7105                         (29, temporary_channel_id, option),
7106                         (31, channel_pending_event_emitted, option),
7107                         (33, pending_monitor_updates, vec_type),
7108                 });
7109
7110                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7111                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7112                         // If we've gotten to the funding stage of the channel, populate the signer with its
7113                         // required channel parameters.
7114                         let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7115                         if non_shutdown_state >= (ChannelState::FundingCreated as u32) {
7116                                 holder_signer.provide_channel_parameters(&channel_parameters);
7117                         }
7118                         (channel_keys_id, holder_signer)
7119                 } else {
7120                         // `keys_data` can be `None` if we had corrupted data.
7121                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7122                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7123                         (holder_signer.channel_keys_id(), holder_signer)
7124                 };
7125
7126                 if let Some(preimages) = preimages_opt {
7127                         let mut iter = preimages.into_iter();
7128                         for htlc in pending_outbound_htlcs.iter_mut() {
7129                                 match &htlc.state {
7130                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7131                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7132                                         }
7133                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7134                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7135                                         }
7136                                         _ => {}
7137                                 }
7138                         }
7139                         // We expect all preimages to be consumed above
7140                         if iter.next().is_some() {
7141                                 return Err(DecodeError::InvalidValue);
7142                         }
7143                 }
7144
7145                 let chan_features = channel_type.as_ref().unwrap();
7146                 if !chan_features.is_subset(our_supported_features) {
7147                         // If the channel was written by a new version and negotiated with features we don't
7148                         // understand yet, refuse to read it.
7149                         return Err(DecodeError::UnknownRequiredFeature);
7150                 }
7151
7152                 let mut secp_ctx = Secp256k1::new();
7153                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7154
7155                 // `user_id` used to be a single u64 value. In order to remain backwards
7156                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7157                 // separate u64 values.
7158                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7159
7160                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7161
7162                 Ok(Channel {
7163                         context: ChannelContext {
7164                                 user_id,
7165
7166                                 config: config.unwrap(),
7167
7168                                 prev_config: None,
7169
7170                                 // Note that we don't care about serializing handshake limits as we only ever serialize
7171                                 // channel data after the handshake has completed.
7172                                 inbound_handshake_limits_override: None,
7173
7174                                 channel_id,
7175                                 temporary_channel_id,
7176                                 channel_state,
7177                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
7178                                 secp_ctx,
7179                                 channel_value_satoshis,
7180
7181                                 latest_monitor_update_id,
7182
7183                                 holder_signer,
7184                                 shutdown_scriptpubkey,
7185                                 destination_script,
7186
7187                                 cur_holder_commitment_transaction_number,
7188                                 cur_counterparty_commitment_transaction_number,
7189                                 value_to_self_msat,
7190
7191                                 holder_max_accepted_htlcs,
7192                                 pending_inbound_htlcs,
7193                                 pending_outbound_htlcs,
7194                                 holding_cell_htlc_updates,
7195
7196                                 resend_order,
7197
7198                                 monitor_pending_channel_ready,
7199                                 monitor_pending_revoke_and_ack,
7200                                 monitor_pending_commitment_signed,
7201                                 monitor_pending_forwards,
7202                                 monitor_pending_failures,
7203                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7204
7205                                 pending_update_fee,
7206                                 holding_cell_update_fee,
7207                                 next_holder_htlc_id,
7208                                 next_counterparty_htlc_id,
7209                                 update_time_counter,
7210                                 feerate_per_kw,
7211
7212                                 #[cfg(debug_assertions)]
7213                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7214                                 #[cfg(debug_assertions)]
7215                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7216
7217                                 last_sent_closing_fee: None,
7218                                 pending_counterparty_closing_signed: None,
7219                                 closing_fee_limits: None,
7220                                 target_closing_feerate_sats_per_kw,
7221
7222                                 inbound_awaiting_accept: false,
7223
7224                                 funding_tx_confirmed_in,
7225                                 funding_tx_confirmation_height,
7226                                 short_channel_id,
7227                                 channel_creation_height: channel_creation_height.unwrap(),
7228
7229                                 counterparty_dust_limit_satoshis,
7230                                 holder_dust_limit_satoshis,
7231                                 counterparty_max_htlc_value_in_flight_msat,
7232                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7233                                 counterparty_selected_channel_reserve_satoshis,
7234                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7235                                 counterparty_htlc_minimum_msat,
7236                                 holder_htlc_minimum_msat,
7237                                 counterparty_max_accepted_htlcs,
7238                                 minimum_depth,
7239
7240                                 counterparty_forwarding_info,
7241
7242                                 channel_transaction_parameters: channel_parameters,
7243                                 funding_transaction,
7244
7245                                 counterparty_cur_commitment_point,
7246                                 counterparty_prev_commitment_point,
7247                                 counterparty_node_id,
7248
7249                                 counterparty_shutdown_scriptpubkey,
7250
7251                                 commitment_secrets,
7252
7253                                 channel_update_status,
7254                                 closing_signed_in_flight: false,
7255
7256                                 announcement_sigs,
7257
7258                                 #[cfg(any(test, fuzzing))]
7259                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7260                                 #[cfg(any(test, fuzzing))]
7261                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7262
7263                                 workaround_lnd_bug_4006: None,
7264                                 sent_message_awaiting_response: None,
7265
7266                                 latest_inbound_scid_alias,
7267                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7268                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7269
7270                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7271                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7272
7273                                 #[cfg(any(test, fuzzing))]
7274                                 historical_inbound_htlc_fulfills,
7275
7276                                 channel_type: channel_type.unwrap(),
7277                                 channel_keys_id,
7278
7279                                 pending_monitor_updates: pending_monitor_updates.unwrap(),
7280                         }
7281                 })
7282         }
7283 }
7284
7285 #[cfg(test)]
7286 mod tests {
7287         use std::cmp;
7288         use bitcoin::blockdata::script::{Script, Builder};
7289         use bitcoin::blockdata::transaction::{Transaction, TxOut};
7290         use bitcoin::blockdata::constants::genesis_block;
7291         use bitcoin::blockdata::opcodes;
7292         use bitcoin::network::constants::Network;
7293         use hex;
7294         use crate::ln::PaymentHash;
7295         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7296         #[cfg(anchors)]
7297         use crate::ln::channel::InitFeatures;
7298         use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7299         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7300         use crate::ln::features::ChannelTypeFeatures;
7301         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7302         use crate::ln::script::ShutdownScript;
7303         use crate::ln::chan_utils;
7304         use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7305         use crate::chain::BestBlock;
7306         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7307         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7308         use crate::chain::transaction::OutPoint;
7309         use crate::routing::router::Path;
7310         use crate::util::config::UserConfig;
7311         use crate::util::enforcing_trait_impls::EnforcingSigner;
7312         use crate::util::errors::APIError;
7313         use crate::util::test_utils;
7314         use crate::util::test_utils::OnGetShutdownScriptpubkey;
7315         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7316         use bitcoin::secp256k1::ffi::Signature as FFISignature;
7317         use bitcoin::secp256k1::{SecretKey,PublicKey};
7318         use bitcoin::hashes::sha256::Hash as Sha256;
7319         use bitcoin::hashes::Hash;
7320         use bitcoin::hash_types::WPubkeyHash;
7321         use bitcoin::PackedLockTime;
7322         use bitcoin::util::address::WitnessVersion;
7323         use crate::prelude::*;
7324
7325         struct TestFeeEstimator {
7326                 fee_est: u32
7327         }
7328         impl FeeEstimator for TestFeeEstimator {
7329                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7330                         self.fee_est
7331                 }
7332         }
7333
7334         #[test]
7335         fn test_max_funding_satoshis_no_wumbo() {
7336                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7337                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7338                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7339         }
7340
7341         #[test]
7342         fn test_no_fee_check_overflow() {
7343                 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7344                 // arithmetic, causing a panic with debug assertions enabled.
7345                 let fee_est = TestFeeEstimator { fee_est: 42 };
7346                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7347                 assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator,
7348                         u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7349         }
7350
7351         struct Keys {
7352                 signer: InMemorySigner,
7353         }
7354
7355         impl EntropySource for Keys {
7356                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7357         }
7358
7359         impl SignerProvider for Keys {
7360                 type Signer = InMemorySigner;
7361
7362                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7363                         self.signer.channel_keys_id()
7364                 }
7365
7366                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7367                         self.signer.clone()
7368                 }
7369
7370                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7371
7372                 fn get_destination_script(&self) -> Result<Script, ()> {
7373                         let secp_ctx = Secp256k1::signing_only();
7374                         let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7375                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7376                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7377                 }
7378
7379                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7380                         let secp_ctx = Secp256k1::signing_only();
7381                         let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7382                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7383                 }
7384         }
7385
7386         #[cfg(not(feature = "grind_signatures"))]
7387         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7388                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7389         }
7390
7391         #[test]
7392         fn upfront_shutdown_script_incompatibility() {
7393                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7394                 let non_v0_segwit_shutdown_script =
7395                         ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7396
7397                 let seed = [42; 32];
7398                 let network = Network::Testnet;
7399                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7400                 keys_provider.expect(OnGetShutdownScriptpubkey {
7401                         returns: non_v0_segwit_shutdown_script.clone(),
7402                 });
7403
7404                 let secp_ctx = Secp256k1::new();
7405                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7406                 let config = UserConfig::default();
7407                 match OutboundV1Channel::<EnforcingSigner>::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7408                         Err(APIError::IncompatibleShutdownScript { script }) => {
7409                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7410                         },
7411                         Err(e) => panic!("Unexpected error: {:?}", e),
7412                         Ok(_) => panic!("Expected error"),
7413                 }
7414         }
7415
7416         // Check that, during channel creation, we use the same feerate in the open channel message
7417         // as we do in the Channel object creation itself.
7418         #[test]
7419         fn test_open_channel_msg_fee() {
7420                 let original_fee = 253;
7421                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7422                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7423                 let secp_ctx = Secp256k1::new();
7424                 let seed = [42; 32];
7425                 let network = Network::Testnet;
7426                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7427
7428                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7429                 let config = UserConfig::default();
7430                 let node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7431
7432                 // Now change the fee so we can check that the fee in the open_channel message is the
7433                 // same as the old fee.
7434                 fee_est.fee_est = 500;
7435                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7436                 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7437         }
7438
7439         #[test]
7440         fn test_holder_vs_counterparty_dust_limit() {
7441                 // Test that when calculating the local and remote commitment transaction fees, the correct
7442                 // dust limits are used.
7443                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7444                 let secp_ctx = Secp256k1::new();
7445                 let seed = [42; 32];
7446                 let network = Network::Testnet;
7447                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7448                 let logger = test_utils::TestLogger::new();
7449
7450                 // Go through the flow of opening a channel between two nodes, making sure
7451                 // they have different dust limits.
7452
7453                 // Create Node A's channel pointing to Node B's pubkey
7454                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7455                 let config = UserConfig::default();
7456                 let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7457
7458                 // Create Node B's channel by receiving Node A's open_channel message
7459                 // Make sure A's dust limit is as we expect.
7460                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7461                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7462                 let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
7463
7464                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7465                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
7466                 accept_channel_msg.dust_limit_satoshis = 546;
7467                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7468                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7469
7470                 // Put some inbound and outbound HTLCs in A's channel.
7471                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7472                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7473                         htlc_id: 0,
7474                         amount_msat: htlc_amount_msat,
7475                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7476                         cltv_expiry: 300000000,
7477                         state: InboundHTLCState::Committed,
7478                 });
7479
7480                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7481                         htlc_id: 1,
7482                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7483                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7484                         cltv_expiry: 200000000,
7485                         state: OutboundHTLCState::Committed,
7486                         source: HTLCSource::OutboundRoute {
7487                                 path: Path { hops: Vec::new(), blinded_tail: None },
7488                                 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7489                                 first_hop_htlc_msat: 548,
7490                                 payment_id: PaymentId([42; 32]),
7491                         }
7492                 });
7493
7494                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7495                 // the dust limit check.
7496                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7497                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7498                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors());
7499                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7500
7501                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7502                 // of the HTLCs are seen to be above the dust limit.
7503                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7504                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors());
7505                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7506                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7507                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7508         }
7509
7510         #[test]
7511         fn test_timeout_vs_success_htlc_dust_limit() {
7512                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7513                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7514                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7515                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7516                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7517                 let secp_ctx = Secp256k1::new();
7518                 let seed = [42; 32];
7519                 let network = Network::Testnet;
7520                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7521
7522                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7523                 let config = UserConfig::default();
7524                 let mut chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7525
7526                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors());
7527                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors());
7528
7529                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7530                 // counted as dust when it shouldn't be.
7531                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7532                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7533                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7534                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7535
7536                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7537                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7538                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7539                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7540                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7541
7542                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7543
7544                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7545                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7546                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7547                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7548                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7549
7550                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7551                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7552                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7553                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7554                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7555         }
7556
7557         #[test]
7558         fn channel_reestablish_no_updates() {
7559                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7560                 let logger = test_utils::TestLogger::new();
7561                 let secp_ctx = Secp256k1::new();
7562                 let seed = [42; 32];
7563                 let network = Network::Testnet;
7564                 let best_block = BestBlock::from_network(network);
7565                 let chain_hash = best_block.block_hash();
7566                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7567
7568                 // Go through the flow of opening a channel between two nodes.
7569
7570                 // Create Node A's channel pointing to Node B's pubkey
7571                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7572                 let config = UserConfig::default();
7573                 let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7574
7575                 // Create Node B's channel by receiving Node A's open_channel message
7576                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
7577                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7578                 let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
7579
7580                 // Node B --> Node A: accept channel
7581                 let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
7582                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7583
7584                 // Node A --> Node B: funding created
7585                 let output_script = node_a_chan.context.get_funding_redeemscript();
7586                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7587                         value: 10000000, script_pubkey: output_script.clone(),
7588                 }]};
7589                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7590                 let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
7591                 let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
7592
7593                 // Node B --> Node A: funding signed
7594                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
7595
7596                 // Now disconnect the two nodes and check that the commitment point in
7597                 // Node B's channel_reestablish message is sane.
7598                 node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
7599                 let msg = node_b_chan.get_channel_reestablish(&&logger);
7600                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7601                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7602                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7603
7604                 // Check that the commitment point in Node A's channel_reestablish message
7605                 // is sane.
7606                 node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
7607                 let msg = node_a_chan.get_channel_reestablish(&&logger);
7608                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7609                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7610                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7611         }
7612
7613         #[test]
7614         fn test_configured_holder_max_htlc_value_in_flight() {
7615                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7616                 let logger = test_utils::TestLogger::new();
7617                 let secp_ctx = Secp256k1::new();
7618                 let seed = [42; 32];
7619                 let network = Network::Testnet;
7620                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7621                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7622                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7623
7624                 let mut config_2_percent = UserConfig::default();
7625                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
7626                 let mut config_99_percent = UserConfig::default();
7627                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
7628                 let mut config_0_percent = UserConfig::default();
7629                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
7630                 let mut config_101_percent = UserConfig::default();
7631                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
7632
7633                 // Test that `new_outbound` creates a channel with the correct value for
7634                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7635                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
7636                 let chan_1 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
7637                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
7638                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
7639
7640                 // Test with the upper bound - 1 of valid values (99%).
7641                 let chan_2 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
7642                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
7643                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
7644
7645                 let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
7646
7647                 // Test that `new_from_req` creates a channel with the correct value for
7648                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7649                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
7650                 let chan_3 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
7651                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
7652                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
7653
7654                 // Test with the upper bound - 1 of valid values (99%).
7655                 let chan_4 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
7656                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
7657                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
7658
7659                 // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
7660                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
7661                 let chan_5 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
7662                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
7663                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
7664
7665                 // Test that `new_outbound` uses the upper bound of the configurable percentage values
7666                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
7667                 // than 100.
7668                 let chan_6 = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
7669                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
7670                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
7671
7672                 // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
7673                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
7674                 let chan_7 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
7675                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
7676                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
7677
7678                 // Test that `new_from_req` uses the upper bound of the configurable percentage values
7679                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
7680                 // than 100.
7681                 let chan_8 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
7682                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
7683                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
7684         }
7685
7686         #[test]
7687         fn test_configured_holder_selected_channel_reserve_satoshis() {
7688
7689                 // Test that `new_outbound` and `new_from_req` create a channel with the correct
7690                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
7691                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
7692
7693                 // Test with valid but unreasonably high channel reserves
7694                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
7695                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
7696                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
7697
7698                 // Test with calculated channel reserve less than lower bound
7699                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7700                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
7701
7702                 // Test with invalid channel reserves since sum of both is greater than or equal
7703                 // to channel value
7704                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
7705                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
7706         }
7707
7708         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
7709                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
7710                 let logger = test_utils::TestLogger::new();
7711                 let secp_ctx = Secp256k1::new();
7712                 let seed = [42; 32];
7713                 let network = Network::Testnet;
7714                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7715                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7716                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7717
7718
7719                 let mut outbound_node_config = UserConfig::default();
7720                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
7721                 let chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
7722
7723                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
7724                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
7725
7726                 let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
7727                 let mut inbound_node_config = UserConfig::default();
7728                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
7729
7730                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
7731                         let chan_inbound_node = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
7732
7733                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
7734
7735                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
7736                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
7737                 } else {
7738                         // Channel Negotiations failed
7739                         let result = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
7740                         assert!(result.is_err());
7741                 }
7742         }
7743
7744         #[test]
7745         fn channel_update() {
7746                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7747                 let secp_ctx = Secp256k1::new();
7748                 let seed = [42; 32];
7749                 let network = Network::Testnet;
7750                 let chain_hash = genesis_block(network).header.block_hash();
7751                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7752
7753                 // Create a channel.
7754                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7755                 let config = UserConfig::default();
7756                 let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7757                 assert!(node_a_chan.context.counterparty_forwarding_info.is_none());
7758                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default
7759                 assert!(node_a_chan.context.counterparty_forwarding_info().is_none());
7760
7761                 // Make sure that receiving a channel update will update the Channel as expected.
7762                 let update = ChannelUpdate {
7763                         contents: UnsignedChannelUpdate {
7764                                 chain_hash,
7765                                 short_channel_id: 0,
7766                                 timestamp: 0,
7767                                 flags: 0,
7768                                 cltv_expiry_delta: 100,
7769                                 htlc_minimum_msat: 5,
7770                                 htlc_maximum_msat: MAX_VALUE_MSAT,
7771                                 fee_base_msat: 110,
7772                                 fee_proportional_millionths: 11,
7773                                 excess_data: Vec::new(),
7774                         },
7775                         signature: Signature::from(unsafe { FFISignature::new() })
7776                 };
7777                 node_a_chan.channel_update(&update).unwrap();
7778
7779                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
7780                 // change our official htlc_minimum_msat.
7781                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
7782                 match node_a_chan.context.counterparty_forwarding_info() {
7783                         Some(info) => {
7784                                 assert_eq!(info.cltv_expiry_delta, 100);
7785                                 assert_eq!(info.fee_base_msat, 110);
7786                                 assert_eq!(info.fee_proportional_millionths, 11);
7787                         },
7788                         None => panic!("expected counterparty forwarding info to be Some")
7789                 }
7790         }
7791
7792         #[cfg(feature = "_test_vectors")]
7793         #[test]
7794         fn outbound_commitment_test() {
7795                 use bitcoin::util::sighash;
7796                 use bitcoin::consensus::encode::serialize;
7797                 use bitcoin::blockdata::transaction::EcdsaSighashType;
7798                 use bitcoin::hashes::hex::FromHex;
7799                 use bitcoin::hash_types::Txid;
7800                 use bitcoin::secp256k1::Message;
7801                 use crate::sign::EcdsaChannelSigner;
7802                 use crate::ln::PaymentPreimage;
7803                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
7804                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
7805                 use crate::util::logger::Logger;
7806                 use crate::sync::Arc;
7807
7808                 // Test vectors from BOLT 3 Appendices C and F (anchors):
7809                 let feeest = TestFeeEstimator{fee_est: 15000};
7810                 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
7811                 let secp_ctx = Secp256k1::new();
7812
7813                 let mut signer = InMemorySigner::new(
7814                         &secp_ctx,
7815                         SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
7816                         SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7817                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
7818                         SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
7819                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
7820
7821                         // These aren't set in the test vectors:
7822                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
7823                         10_000_000,
7824                         [0; 32],
7825                         [0; 32],
7826                 );
7827
7828                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
7829                                 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
7830                 let keys_provider = Keys { signer: signer.clone() };
7831
7832                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7833                 let mut config = UserConfig::default();
7834                 config.channel_handshake_config.announced_channel = false;
7835                 let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
7836                 chan.context.holder_dust_limit_satoshis = 546;
7837                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
7838
7839                 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
7840
7841                 let counterparty_pubkeys = ChannelPublicKeys {
7842                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
7843                         revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
7844                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
7845                         delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
7846                         htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
7847                 };
7848                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
7849                         CounterpartyChannelTransactionParameters {
7850                                 pubkeys: counterparty_pubkeys.clone(),
7851                                 selected_contest_delay: 144
7852                         });
7853                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
7854                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
7855
7856                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
7857                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
7858
7859                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
7860                            hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
7861
7862                 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
7863                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
7864
7865                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
7866                 // derived from a commitment_seed, so instead we copy it here and call
7867                 // build_commitment_transaction.
7868                 let delayed_payment_base = &chan.context.holder_signer.pubkeys().delayed_payment_basepoint;
7869                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
7870                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
7871                 let htlc_basepoint = &chan.context.holder_signer.pubkeys().htlc_basepoint;
7872                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
7873
7874                 macro_rules! test_commitment {
7875                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
7876                                 chan.context.channel_transaction_parameters.opt_anchors = None;
7877                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*);
7878                         };
7879                 }
7880
7881                 macro_rules! test_commitment_with_anchors {
7882                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
7883                                 chan.context.channel_transaction_parameters.opt_anchors = Some(());
7884                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*);
7885                         };
7886                 }
7887
7888                 macro_rules! test_commitment_common {
7889                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
7890                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
7891                         } ) => { {
7892                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
7893                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
7894
7895                                         let htlcs = commitment_stats.htlcs_included.drain(..)
7896                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
7897                                                 .collect();
7898                                         (commitment_stats.tx, htlcs)
7899                                 };
7900                                 let trusted_tx = commitment_tx.trust();
7901                                 let unsigned_tx = trusted_tx.built_transaction();
7902                                 let redeemscript = chan.context.get_funding_redeemscript();
7903                                 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
7904                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
7905                                 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
7906                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
7907
7908                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
7909                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
7910                                 let mut counterparty_htlc_sigs = Vec::new();
7911                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
7912                                 $({
7913                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
7914                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
7915                                         counterparty_htlc_sigs.push(remote_signature);
7916                                 })*
7917                                 assert_eq!(htlcs.len(), per_htlc.len());
7918
7919                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7920                                         commitment_tx.clone(),
7921                                         counterparty_signature,
7922                                         counterparty_htlc_sigs,
7923                                         &chan.context.holder_signer.pubkeys().funding_pubkey,
7924                                         chan.context.counterparty_funding_pubkey()
7925                                 );
7926                                 let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap();
7927                                 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
7928
7929                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
7930                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
7931                                 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
7932
7933                                 // ((htlc, counterparty_sig), (index, holder_sig))
7934                                 let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate());
7935
7936                                 $({
7937                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
7938                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
7939
7940                                         let ref htlc = htlcs[$htlc_idx];
7941                                         let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
7942                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
7943                                                 &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
7944                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
7945                                         let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
7946                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
7947                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
7948
7949                                         let mut preimage: Option<PaymentPreimage> = None;
7950                                         if !htlc.offered {
7951                                                 for i in 0..5 {
7952                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
7953                                                         if out == htlc.payment_hash {
7954                                                                 preimage = Some(PaymentPreimage([i; 32]));
7955                                                         }
7956                                                 }
7957
7958                                                 assert!(preimage.is_some());
7959                                         }
7960
7961                                         let htlc_sig = htlc_sig_iter.next().unwrap();
7962                                         let num_anchors = if $opt_anchors { 2 } else { 0 };
7963                                         assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
7964
7965                                         let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
7966                                         assert_eq!(signature, *(htlc_sig.1).1, "htlc sig");
7967                                         let index = (htlc_sig.1).0;
7968                                         let channel_parameters = chan.context.channel_transaction_parameters.as_holder_broadcastable();
7969                                         let trusted_tx = holder_commitment_tx.trust();
7970                                         log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))));
7971                                         assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..],
7972                                                         hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
7973                                 })*
7974                                 assert!(htlc_sig_iter.next().is_none());
7975                         } }
7976                 }
7977
7978                 // anchors: simple commitment tx with no HTLCs and single anchor
7979                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
7980                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
7981                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7982
7983                 // simple commitment tx with no HTLCs
7984                 chan.context.value_to_self_msat = 7000000000;
7985
7986                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
7987                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
7988                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7989
7990                 // anchors: simple commitment tx with no HTLCs
7991                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
7992                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
7993                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7994
7995                 chan.context.pending_inbound_htlcs.push({
7996                         let mut out = InboundHTLCOutput{
7997                                 htlc_id: 0,
7998                                 amount_msat: 1000000,
7999                                 cltv_expiry: 500,
8000                                 payment_hash: PaymentHash([0; 32]),
8001                                 state: InboundHTLCState::Committed,
8002                         };
8003                         out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8004                         out
8005                 });
8006                 chan.context.pending_inbound_htlcs.push({
8007                         let mut out = InboundHTLCOutput{
8008                                 htlc_id: 1,
8009                                 amount_msat: 2000000,
8010                                 cltv_expiry: 501,
8011                                 payment_hash: PaymentHash([0; 32]),
8012                                 state: InboundHTLCState::Committed,
8013                         };
8014                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8015                         out
8016                 });
8017                 chan.context.pending_outbound_htlcs.push({
8018                         let mut out = OutboundHTLCOutput{
8019                                 htlc_id: 2,
8020                                 amount_msat: 2000000,
8021                                 cltv_expiry: 502,
8022                                 payment_hash: PaymentHash([0; 32]),
8023                                 state: OutboundHTLCState::Committed,
8024                                 source: HTLCSource::dummy(),
8025                         };
8026                         out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8027                         out
8028                 });
8029                 chan.context.pending_outbound_htlcs.push({
8030                         let mut out = OutboundHTLCOutput{
8031                                 htlc_id: 3,
8032                                 amount_msat: 3000000,
8033                                 cltv_expiry: 503,
8034                                 payment_hash: PaymentHash([0; 32]),
8035                                 state: OutboundHTLCState::Committed,
8036                                 source: HTLCSource::dummy(),
8037                         };
8038                         out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8039                         out
8040                 });
8041                 chan.context.pending_inbound_htlcs.push({
8042                         let mut out = InboundHTLCOutput{
8043                                 htlc_id: 4,
8044                                 amount_msat: 4000000,
8045                                 cltv_expiry: 504,
8046                                 payment_hash: PaymentHash([0; 32]),
8047                                 state: InboundHTLCState::Committed,
8048                         };
8049                         out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8050                         out
8051                 });
8052
8053                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8054                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8055                 chan.context.feerate_per_kw = 0;
8056
8057                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8058                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8059                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8060
8061                                   { 0,
8062                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8063                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8064                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8065
8066                                   { 1,
8067                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8068                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8069                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8070
8071                                   { 2,
8072                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8073                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8074                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8075
8076                                   { 3,
8077                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8078                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8079                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8080
8081                                   { 4,
8082                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8083                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8084                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8085                 } );
8086
8087                 // commitment tx with seven outputs untrimmed (maximum feerate)
8088                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8089                 chan.context.feerate_per_kw = 647;
8090
8091                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8092                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8093                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8094
8095                                   { 0,
8096                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8097                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8098                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8099
8100                                   { 1,
8101                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8102                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8103                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8104
8105                                   { 2,
8106                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8107                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8108                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8109
8110                                   { 3,
8111                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8112                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8113                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8114
8115                                   { 4,
8116                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8117                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8118                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8119                 } );
8120
8121                 // commitment tx with six outputs untrimmed (minimum feerate)
8122                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8123                 chan.context.feerate_per_kw = 648;
8124
8125                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8126                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8127                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8128
8129                                   { 0,
8130                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8131                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8132                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8133
8134                                   { 1,
8135                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8136                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8137                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8138
8139                                   { 2,
8140                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8141                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8142                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8143
8144                                   { 3,
8145                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8146                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8147                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8148                 } );
8149
8150                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8151                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8152                 chan.context.feerate_per_kw = 645;
8153                 chan.context.holder_dust_limit_satoshis = 1001;
8154
8155                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8156                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8157                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8158
8159                                   { 0,
8160                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8161                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8162                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8163
8164                                   { 1,
8165                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8166                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8167                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8168
8169                                   { 2,
8170                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8171                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8172                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8173
8174                                   { 3,
8175                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8176                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8177                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8178                 } );
8179
8180                 // commitment tx with six outputs untrimmed (maximum feerate)
8181                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8182                 chan.context.feerate_per_kw = 2069;
8183                 chan.context.holder_dust_limit_satoshis = 546;
8184
8185                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8186                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8187                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8188
8189                                   { 0,
8190                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8191                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8192                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8193
8194                                   { 1,
8195                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8196                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8197                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8198
8199                                   { 2,
8200                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8201                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8202                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8203
8204                                   { 3,
8205                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8206                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8207                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8208                 } );
8209
8210                 // commitment tx with five outputs untrimmed (minimum feerate)
8211                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8212                 chan.context.feerate_per_kw = 2070;
8213
8214                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8215                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8216                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8217
8218                                   { 0,
8219                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8220                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8221                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8222
8223                                   { 1,
8224                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8225                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8226                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8227
8228                                   { 2,
8229                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8230                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8231                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8232                 } );
8233
8234                 // commitment tx with five outputs untrimmed (maximum feerate)
8235                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8236                 chan.context.feerate_per_kw = 2194;
8237
8238                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8239                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8240                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8241
8242                                   { 0,
8243                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8244                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8245                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8246
8247                                   { 1,
8248                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8249                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8250                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8251
8252                                   { 2,
8253                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8254                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8255                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8256                 } );
8257
8258                 // commitment tx with four outputs untrimmed (minimum feerate)
8259                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8260                 chan.context.feerate_per_kw = 2195;
8261
8262                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8263                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8264                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8265
8266                                   { 0,
8267                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8268                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8269                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8270
8271                                   { 1,
8272                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8273                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8274                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8275                 } );
8276
8277                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8278                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8279                 chan.context.feerate_per_kw = 2185;
8280                 chan.context.holder_dust_limit_satoshis = 2001;
8281
8282                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8283                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8284                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8285
8286                                   { 0,
8287                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8288                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8289                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8290
8291                                   { 1,
8292                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8293                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8294                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8295                 } );
8296
8297                 // commitment tx with four outputs untrimmed (maximum feerate)
8298                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8299                 chan.context.feerate_per_kw = 3702;
8300                 chan.context.holder_dust_limit_satoshis = 546;
8301
8302                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8303                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8304                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8305
8306                                   { 0,
8307                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8308                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8309                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8310
8311                                   { 1,
8312                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8313                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8314                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8315                 } );
8316
8317                 // commitment tx with three outputs untrimmed (minimum feerate)
8318                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8319                 chan.context.feerate_per_kw = 3703;
8320
8321                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8322                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8323                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8324
8325                                   { 0,
8326                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8327                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8328                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8329                 } );
8330
8331                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8332                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8333                 chan.context.feerate_per_kw = 3687;
8334                 chan.context.holder_dust_limit_satoshis = 3001;
8335
8336                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8337                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8338                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8339
8340                                   { 0,
8341                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8342                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8343                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8344                 } );
8345
8346                 // commitment tx with three outputs untrimmed (maximum feerate)
8347                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8348                 chan.context.feerate_per_kw = 4914;
8349                 chan.context.holder_dust_limit_satoshis = 546;
8350
8351                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8352                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8353                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8354
8355                                   { 0,
8356                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8357                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8358                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8359                 } );
8360
8361                 // commitment tx with two outputs untrimmed (minimum feerate)
8362                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8363                 chan.context.feerate_per_kw = 4915;
8364                 chan.context.holder_dust_limit_satoshis = 546;
8365
8366                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8367                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8368                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8369
8370                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8371                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8372                 chan.context.feerate_per_kw = 4894;
8373                 chan.context.holder_dust_limit_satoshis = 4001;
8374
8375                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8376                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8377                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8378
8379                 // commitment tx with two outputs untrimmed (maximum feerate)
8380                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8381                 chan.context.feerate_per_kw = 9651180;
8382                 chan.context.holder_dust_limit_satoshis = 546;
8383
8384                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8385                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8386                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8387
8388                 // commitment tx with one output untrimmed (minimum feerate)
8389                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8390                 chan.context.feerate_per_kw = 9651181;
8391
8392                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8393                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8394                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8395
8396                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8397                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8398                 chan.context.feerate_per_kw = 6216010;
8399                 chan.context.holder_dust_limit_satoshis = 4001;
8400
8401                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8402                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8403                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8404
8405                 // commitment tx with fee greater than funder amount
8406                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8407                 chan.context.feerate_per_kw = 9651936;
8408                 chan.context.holder_dust_limit_satoshis = 546;
8409
8410                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8411                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8412                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8413
8414                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8415                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8416                 chan.context.feerate_per_kw = 253;
8417                 chan.context.pending_inbound_htlcs.clear();
8418                 chan.context.pending_inbound_htlcs.push({
8419                         let mut out = InboundHTLCOutput{
8420                                 htlc_id: 1,
8421                                 amount_msat: 2000000,
8422                                 cltv_expiry: 501,
8423                                 payment_hash: PaymentHash([0; 32]),
8424                                 state: InboundHTLCState::Committed,
8425                         };
8426                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8427                         out
8428                 });
8429                 chan.context.pending_outbound_htlcs.clear();
8430                 chan.context.pending_outbound_htlcs.push({
8431                         let mut out = OutboundHTLCOutput{
8432                                 htlc_id: 6,
8433                                 amount_msat: 5000001,
8434                                 cltv_expiry: 506,
8435                                 payment_hash: PaymentHash([0; 32]),
8436                                 state: OutboundHTLCState::Committed,
8437                                 source: HTLCSource::dummy(),
8438                         };
8439                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8440                         out
8441                 });
8442                 chan.context.pending_outbound_htlcs.push({
8443                         let mut out = OutboundHTLCOutput{
8444                                 htlc_id: 5,
8445                                 amount_msat: 5000000,
8446                                 cltv_expiry: 505,
8447                                 payment_hash: PaymentHash([0; 32]),
8448                                 state: OutboundHTLCState::Committed,
8449                                 source: HTLCSource::dummy(),
8450                         };
8451                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8452                         out
8453                 });
8454
8455                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8456                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8457                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8458
8459                                   { 0,
8460                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8461                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8462                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8463                                   { 1,
8464                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8465                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8466                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8467                                   { 2,
8468                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8469                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8470                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8471                 } );
8472
8473                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8474                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8475                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8476
8477                                   { 0,
8478                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8479                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8480                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8481                                   { 1,
8482                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8483                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8484                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8485                                   { 2,
8486                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8487                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8488                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8489                 } );
8490         }
8491
8492         #[test]
8493         fn test_per_commitment_secret_gen() {
8494                 // Test vectors from BOLT 3 Appendix D:
8495
8496                 let mut seed = [0; 32];
8497                 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8498                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8499                            hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8500
8501                 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8502                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8503                            hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8504
8505                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8506                            hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8507
8508                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8509                            hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8510
8511                 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8512                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8513                            hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
8514         }
8515
8516         #[test]
8517         fn test_key_derivation() {
8518                 // Test vectors from BOLT 3 Appendix E:
8519                 let secp_ctx = Secp256k1::new();
8520
8521                 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
8522                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8523
8524                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
8525                 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
8526
8527                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8528                 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
8529
8530                 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8531                                 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
8532
8533                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
8534                                 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
8535
8536                 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8537                                 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
8538
8539                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
8540                                 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
8541         }
8542
8543         #[test]
8544         fn test_zero_conf_channel_type_support() {
8545                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8546                 let secp_ctx = Secp256k1::new();
8547                 let seed = [42; 32];
8548                 let network = Network::Testnet;
8549                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8550                 let logger = test_utils::TestLogger::new();
8551
8552                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8553                 let config = UserConfig::default();
8554                 let node_a_chan = OutboundV1Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider,
8555                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8556
8557                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8558                 channel_type_features.set_zero_conf_required();
8559
8560                 let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
8561                 open_channel_msg.channel_type = Some(channel_type_features);
8562                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8563                 let res = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider,
8564                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
8565                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42);
8566                 assert!(res.is_ok());
8567         }
8568
8569         #[cfg(anchors)]
8570         #[test]
8571         fn test_supports_anchors_zero_htlc_tx_fee() {
8572                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
8573                 // resulting `channel_type`.
8574                 let secp_ctx = Secp256k1::new();
8575                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8576                 let network = Network::Testnet;
8577                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8578                 let logger = test_utils::TestLogger::new();
8579
8580                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8581                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8582
8583                 let mut config = UserConfig::default();
8584                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
8585
8586                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
8587                 // need to signal it.
8588                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8589                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8590                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
8591                         &config, 0, 42
8592                 ).unwrap();
8593                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
8594
8595                 let mut expected_channel_type = ChannelTypeFeatures::empty();
8596                 expected_channel_type.set_static_remote_key_required();
8597                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
8598
8599                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8600                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8601                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8602                 ).unwrap();
8603
8604                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8605                 let channel_b = Channel::<EnforcingSigner>::new_from_req(
8606                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8607                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
8608                         &open_channel_msg, 7, &config, 0, &&logger, 42
8609                 ).unwrap();
8610
8611                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
8612                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
8613         }
8614
8615         #[cfg(anchors)]
8616         #[test]
8617         fn test_rejects_implicit_simple_anchors() {
8618                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
8619                 // each side's `InitFeatures`, it is rejected.
8620                 let secp_ctx = Secp256k1::new();
8621                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8622                 let network = Network::Testnet;
8623                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8624                 let logger = test_utils::TestLogger::new();
8625
8626                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8627                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8628
8629                 let config = UserConfig::default();
8630
8631                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
8632                 let static_remote_key_required: u64 = 1 << 12;
8633                 let simple_anchors_required: u64 = 1 << 20;
8634                 let raw_init_features = static_remote_key_required | simple_anchors_required;
8635                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
8636
8637                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8638                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8639                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8640                 ).unwrap();
8641
8642                 // Set `channel_type` to `None` to force the implicit feature negotiation.
8643                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8644                 open_channel_msg.channel_type = None;
8645
8646                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
8647                 // `static_remote_key`, it will fail the channel.
8648                 let channel_b = Channel::<EnforcingSigner>::new_from_req(
8649                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8650                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
8651                         &open_channel_msg, 7, &config, 0, &&logger, 42
8652                 );
8653                 assert!(channel_b.is_err());
8654         }
8655
8656         #[cfg(anchors)]
8657         #[test]
8658         fn test_rejects_simple_anchors_channel_type() {
8659                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
8660                 // it is rejected.
8661                 let secp_ctx = Secp256k1::new();
8662                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8663                 let network = Network::Testnet;
8664                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8665                 let logger = test_utils::TestLogger::new();
8666
8667                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8668                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8669
8670                 let config = UserConfig::default();
8671
8672                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
8673                 let static_remote_key_required: u64 = 1 << 12;
8674                 let simple_anchors_required: u64 = 1 << 20;
8675                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
8676                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
8677                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
8678                 assert!(simple_anchors_init.requires_unknown_bits());
8679                 assert!(simple_anchors_channel_type.requires_unknown_bits());
8680
8681                 // First, we'll try to open a channel between A and B where A requests a channel type for
8682                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
8683                 // B as it's not supported by LDK.
8684                 let channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8685                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8686                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8687                 ).unwrap();
8688
8689                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8690                 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
8691
8692                 let res = Channel::<EnforcingSigner>::new_from_req(
8693                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8694                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
8695                         &open_channel_msg, 7, &config, 0, &&logger, 42
8696                 );
8697                 assert!(res.is_err());
8698
8699                 // Then, we'll try to open another channel where A requests a channel type for
8700                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
8701                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
8702                 // LDK.
8703                 let mut channel_a = OutboundV1Channel::<EnforcingSigner>::new_outbound(
8704                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
8705                         10000000, 100000, 42, &config, 0, 42
8706                 ).unwrap();
8707
8708                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8709
8710                 let channel_b = Channel::<EnforcingSigner>::new_from_req(
8711                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8712                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
8713                         &open_channel_msg, 7, &config, 0, &&logger, 42
8714                 ).unwrap();
8715
8716                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
8717                 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
8718
8719                 let res = channel_a.accept_channel(
8720                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
8721                 );
8722                 assert!(res.is_err());
8723         }
8724 }