Handling for sign_counterparty_commitment failing during normal op
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script,Builder};
12 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
13 use bitcoin::util::sighash;
14 use bitcoin::consensus::encode;
15
16 use bitcoin::hashes::Hash;
17 use bitcoin::hashes::sha256::Hash as Sha256;
18 use bitcoin::hashes::sha256d::Hash as Sha256d;
19 use bitcoin::hash_types::{Txid, BlockHash};
20
21 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
22 use bitcoin::secp256k1::{PublicKey,SecretKey};
23 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
24 use bitcoin::secp256k1;
25
26 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
27 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
28 use crate::ln::msgs;
29 use crate::ln::msgs::DecodeError;
30 use crate::ln::script::{self, ShutdownScript};
31 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
32 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
33 use crate::ln::chan_utils;
34 use crate::ln::onion_utils::HTLCFailReason;
35 use crate::chain::BestBlock;
36 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
37 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
38 use crate::chain::transaction::{OutPoint, TransactionData};
39 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
40 use crate::events::ClosureReason;
41 use crate::routing::gossip::NodeId;
42 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
43 use crate::util::logger::Logger;
44 use crate::util::errors::APIError;
45 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
46 use crate::util::scid_utils::scid_from_parts;
47
48 use crate::io;
49 use crate::prelude::*;
50 use core::{cmp,mem,fmt};
51 use core::ops::Deref;
52 #[cfg(any(test, fuzzing, debug_assertions))]
53 use crate::sync::Mutex;
54 use bitcoin::hashes::hex::ToHex;
55 use crate::sign::type_resolver::ChannelSignerType;
56
57 #[cfg(test)]
58 pub struct ChannelValueStat {
59         pub value_to_self_msat: u64,
60         pub channel_value_msat: u64,
61         pub channel_reserve_msat: u64,
62         pub pending_outbound_htlcs_amount_msat: u64,
63         pub pending_inbound_htlcs_amount_msat: u64,
64         pub holding_cell_outbound_amount_msat: u64,
65         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
66         pub counterparty_dust_limit_msat: u64,
67 }
68
69 pub struct AvailableBalances {
70         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
71         pub balance_msat: u64,
72         /// Total amount available for our counterparty to send to us.
73         pub inbound_capacity_msat: u64,
74         /// Total amount available for us to send to our counterparty.
75         pub outbound_capacity_msat: u64,
76         /// The maximum value we can assign to the next outbound HTLC
77         pub next_outbound_htlc_limit_msat: u64,
78         /// The minimum value we can assign to the next outbound HTLC
79         pub next_outbound_htlc_minimum_msat: u64,
80 }
81
82 #[derive(Debug, Clone, Copy, PartialEq)]
83 enum FeeUpdateState {
84         // Inbound states mirroring InboundHTLCState
85         RemoteAnnounced,
86         AwaitingRemoteRevokeToAnnounce,
87         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
88         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
89         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
90         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
91         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
92
93         // Outbound state can only be `LocalAnnounced` or `Committed`
94         Outbound,
95 }
96
97 enum InboundHTLCRemovalReason {
98         FailRelay(msgs::OnionErrorPacket),
99         FailMalformed(([u8; 32], u16)),
100         Fulfill(PaymentPreimage),
101 }
102
103 enum InboundHTLCState {
104         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
105         /// update_add_htlc message for this HTLC.
106         RemoteAnnounced(PendingHTLCStatus),
107         /// Included in a received commitment_signed message (implying we've
108         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
109         /// state (see the example below). We have not yet included this HTLC in a
110         /// commitment_signed message because we are waiting on the remote's
111         /// aforementioned state revocation. One reason this missing remote RAA
112         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
113         /// is because every time we create a new "state", i.e. every time we sign a
114         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
115         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
116         /// sent provided the per_commitment_point for our current commitment tx.
117         /// The other reason we should not send a commitment_signed without their RAA
118         /// is because their RAA serves to ACK our previous commitment_signed.
119         ///
120         /// Here's an example of how an HTLC could come to be in this state:
121         /// remote --> update_add_htlc(prev_htlc)   --> local
122         /// remote --> commitment_signed(prev_htlc) --> local
123         /// remote <-- revoke_and_ack               <-- local
124         /// remote <-- commitment_signed(prev_htlc) <-- local
125         /// [note that here, the remote does not respond with a RAA]
126         /// remote --> update_add_htlc(this_htlc)   --> local
127         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
128         /// Now `this_htlc` will be assigned this state. It's unable to be officially
129         /// accepted, i.e. included in a commitment_signed, because we're missing the
130         /// RAA that provides our next per_commitment_point. The per_commitment_point
131         /// is used to derive commitment keys, which are used to construct the
132         /// signatures in a commitment_signed message.
133         /// Implies AwaitingRemoteRevoke.
134         ///
135         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
136         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
137         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
138         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
139         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
140         /// channel (before it can then get forwarded and/or removed).
141         /// Implies AwaitingRemoteRevoke.
142         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
143         Committed,
144         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
145         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
146         /// we'll drop it.
147         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
148         /// commitment transaction without it as otherwise we'll have to force-close the channel to
149         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
150         /// anyway). That said, ChannelMonitor does this for us (see
151         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
152         /// our own local state before then, once we're sure that the next commitment_signed and
153         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
154         LocalRemoved(InboundHTLCRemovalReason),
155 }
156
157 struct InboundHTLCOutput {
158         htlc_id: u64,
159         amount_msat: u64,
160         cltv_expiry: u32,
161         payment_hash: PaymentHash,
162         state: InboundHTLCState,
163 }
164
165 enum OutboundHTLCState {
166         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
167         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
168         /// we will promote to Committed (note that they may not accept it until the next time we
169         /// revoke, but we don't really care about that:
170         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
171         ///    money back (though we won't), and,
172         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
173         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
174         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
175         ///    we'll never get out of sync).
176         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
177         /// OutboundHTLCOutput's size just for a temporary bit
178         LocalAnnounced(Box<msgs::OnionPacket>),
179         Committed,
180         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
181         /// the change (though they'll need to revoke before we fail the payment).
182         RemoteRemoved(OutboundHTLCOutcome),
183         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
184         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
185         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
186         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
187         /// remote revoke_and_ack on a previous state before we can do so.
188         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
189         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
190         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
191         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
192         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
193         /// revoke_and_ack to drop completely.
194         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
195 }
196
197 #[derive(Clone)]
198 enum OutboundHTLCOutcome {
199         /// LDK version 0.0.105+ will always fill in the preimage here.
200         Success(Option<PaymentPreimage>),
201         Failure(HTLCFailReason),
202 }
203
204 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
205         fn from(o: Option<HTLCFailReason>) -> Self {
206                 match o {
207                         None => OutboundHTLCOutcome::Success(None),
208                         Some(r) => OutboundHTLCOutcome::Failure(r)
209                 }
210         }
211 }
212
213 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
214         fn into(self) -> Option<&'a HTLCFailReason> {
215                 match self {
216                         OutboundHTLCOutcome::Success(_) => None,
217                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
218                 }
219         }
220 }
221
222 struct OutboundHTLCOutput {
223         htlc_id: u64,
224         amount_msat: u64,
225         cltv_expiry: u32,
226         payment_hash: PaymentHash,
227         state: OutboundHTLCState,
228         source: HTLCSource,
229         skimmed_fee_msat: Option<u64>,
230 }
231
232 /// See AwaitingRemoteRevoke ChannelState for more info
233 enum HTLCUpdateAwaitingACK {
234         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
235                 // always outbound
236                 amount_msat: u64,
237                 cltv_expiry: u32,
238                 payment_hash: PaymentHash,
239                 source: HTLCSource,
240                 onion_routing_packet: msgs::OnionPacket,
241                 // The extra fee we're skimming off the top of this HTLC.
242                 skimmed_fee_msat: Option<u64>,
243         },
244         ClaimHTLC {
245                 payment_preimage: PaymentPreimage,
246                 htlc_id: u64,
247         },
248         FailHTLC {
249                 htlc_id: u64,
250                 err_packet: msgs::OnionErrorPacket,
251         },
252 }
253
254 /// There are a few "states" and then a number of flags which can be applied:
255 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
256 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
257 /// move on to `ChannelReady`.
258 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
259 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
260 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
261 enum ChannelState {
262         /// Implies we have (or are prepared to) send our open_channel/accept_channel message
263         OurInitSent = 1 << 0,
264         /// Implies we have received their `open_channel`/`accept_channel` message
265         TheirInitSent = 1 << 1,
266         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
267         /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
268         /// upon receipt of `funding_created`, so simply skip this state.
269         FundingCreated = 4,
270         /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
271         /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
272         /// and our counterparty consider the funding transaction confirmed.
273         FundingSent = 8,
274         /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
275         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
276         TheirChannelReady = 1 << 4,
277         /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
278         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
279         OurChannelReady = 1 << 5,
280         ChannelReady = 64,
281         /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
282         /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
283         /// dance.
284         PeerDisconnected = 1 << 7,
285         /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
286         /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
287         /// sending any outbound messages until they've managed to finish.
288         MonitorUpdateInProgress = 1 << 8,
289         /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
290         /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
291         /// messages as then we will be unable to determine which HTLCs they included in their
292         /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
293         /// later.
294         /// Flag is set on `ChannelReady`.
295         AwaitingRemoteRevoke = 1 << 9,
296         /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
297         /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
298         /// to respond with our own shutdown message when possible.
299         RemoteShutdownSent = 1 << 10,
300         /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
301         /// point, we may not add any new HTLCs to the channel.
302         LocalShutdownSent = 1 << 11,
303         /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
304         /// to drop us, but we store this anyway.
305         ShutdownComplete = 4096,
306         /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
307         /// broadcasting of the funding transaction is being held until all channels in the batch
308         /// have received funding_signed and have their monitors persisted.
309         WaitingForBatch = 1 << 13,
310 }
311 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
312         ChannelState::LocalShutdownSent as u32 |
313         ChannelState::RemoteShutdownSent as u32;
314 const MULTI_STATE_FLAGS: u32 =
315         BOTH_SIDES_SHUTDOWN_MASK |
316         ChannelState::PeerDisconnected as u32 |
317         ChannelState::MonitorUpdateInProgress as u32;
318 const STATE_FLAGS: u32 =
319         MULTI_STATE_FLAGS |
320         ChannelState::TheirChannelReady as u32 |
321         ChannelState::OurChannelReady as u32 |
322         ChannelState::AwaitingRemoteRevoke as u32 |
323         ChannelState::WaitingForBatch as u32;
324
325 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
326
327 pub const DEFAULT_MAX_HTLCS: u16 = 50;
328
329 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
330         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
331         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
332         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
333 }
334
335 #[cfg(not(test))]
336 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
337 #[cfg(test)]
338 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
339
340 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
341
342 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
343 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
344 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
345 /// `holder_max_htlc_value_in_flight_msat`.
346 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
347
348 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
349 /// `option_support_large_channel` (aka wumbo channels) is not supported.
350 /// It's 2^24 - 1.
351 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
352
353 /// Total bitcoin supply in satoshis.
354 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
355
356 /// The maximum network dust limit for standard script formats. This currently represents the
357 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
358 /// transaction non-standard and thus refuses to relay it.
359 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
360 /// implementations use this value for their dust limit today.
361 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
362
363 /// The maximum channel dust limit we will accept from our counterparty.
364 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
365
366 /// The dust limit is used for both the commitment transaction outputs as well as the closing
367 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
368 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
369 /// In order to avoid having to concern ourselves with standardness during the closing process, we
370 /// simply require our counterparty to use a dust limit which will leave any segwit output
371 /// standard.
372 /// See <https://github.com/lightning/bolts/issues/905> for more details.
373 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
374
375 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
376 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
377
378 /// Used to return a simple Error back to ChannelManager. Will get converted to a
379 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
380 /// channel_id in ChannelManager.
381 pub(super) enum ChannelError {
382         Ignore(String),
383         Warn(String),
384         Close(String),
385 }
386
387 impl fmt::Debug for ChannelError {
388         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
389                 match self {
390                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
391                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
392                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
393                 }
394         }
395 }
396
397 impl fmt::Display for ChannelError {
398         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
399                 match self {
400                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
401                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
402                         &ChannelError::Close(ref e) => write!(f, "{}", e),
403                 }
404         }
405 }
406
407 macro_rules! secp_check {
408         ($res: expr, $err: expr) => {
409                 match $res {
410                         Ok(thing) => thing,
411                         Err(_) => return Err(ChannelError::Close($err)),
412                 }
413         };
414 }
415
416 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
417 /// our counterparty or not. However, we don't want to announce updates right away to avoid
418 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
419 /// our channel_update message and track the current state here.
420 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
421 #[derive(Clone, Copy, PartialEq)]
422 pub(super) enum ChannelUpdateStatus {
423         /// We've announced the channel as enabled and are connected to our peer.
424         Enabled,
425         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
426         DisabledStaged(u8),
427         /// Our channel is live again, but we haven't announced the channel as enabled yet.
428         EnabledStaged(u8),
429         /// We've announced the channel as disabled.
430         Disabled,
431 }
432
433 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
434 #[derive(PartialEq)]
435 pub enum AnnouncementSigsState {
436         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
437         /// we sent the last `AnnouncementSignatures`.
438         NotSent,
439         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
440         /// This state never appears on disk - instead we write `NotSent`.
441         MessageSent,
442         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
443         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
444         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
445         /// they send back a `RevokeAndACK`.
446         /// This state never appears on disk - instead we write `NotSent`.
447         Committed,
448         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
449         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
450         PeerReceived,
451 }
452
453 /// An enum indicating whether the local or remote side offered a given HTLC.
454 enum HTLCInitiator {
455         LocalOffered,
456         RemoteOffered,
457 }
458
459 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
460 struct HTLCStats {
461         pending_htlcs: u32,
462         pending_htlcs_value_msat: u64,
463         on_counterparty_tx_dust_exposure_msat: u64,
464         on_holder_tx_dust_exposure_msat: u64,
465         holding_cell_msat: u64,
466         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
467 }
468
469 /// An enum gathering stats on commitment transaction, either local or remote.
470 struct CommitmentStats<'a> {
471         tx: CommitmentTransaction, // the transaction info
472         feerate_per_kw: u32, // the feerate included to build the transaction
473         total_fee_sat: u64, // the total fee included in the transaction
474         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
475         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
476         local_balance_msat: u64, // local balance before fees but considering dust limits
477         remote_balance_msat: u64, // remote balance before fees but considering dust limits
478         preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
479 }
480
481 /// Used when calculating whether we or the remote can afford an additional HTLC.
482 struct HTLCCandidate {
483         amount_msat: u64,
484         origin: HTLCInitiator,
485 }
486
487 impl HTLCCandidate {
488         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
489                 Self {
490                         amount_msat,
491                         origin,
492                 }
493         }
494 }
495
496 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
497 /// description
498 enum UpdateFulfillFetch {
499         NewClaim {
500                 monitor_update: ChannelMonitorUpdate,
501                 htlc_value_msat: u64,
502                 msg: Option<msgs::UpdateFulfillHTLC>,
503         },
504         DuplicateClaim {},
505 }
506
507 /// The return type of get_update_fulfill_htlc_and_commit.
508 pub enum UpdateFulfillCommitFetch {
509         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
510         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
511         /// previously placed in the holding cell (and has since been removed).
512         NewClaim {
513                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
514                 monitor_update: ChannelMonitorUpdate,
515                 /// The value of the HTLC which was claimed, in msat.
516                 htlc_value_msat: u64,
517         },
518         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
519         /// or has been forgotten (presumably previously claimed).
520         DuplicateClaim {},
521 }
522
523 /// The return value of `monitor_updating_restored`
524 pub(super) struct MonitorRestoreUpdates {
525         pub raa: Option<msgs::RevokeAndACK>,
526         pub commitment_update: Option<msgs::CommitmentUpdate>,
527         pub order: RAACommitmentOrder,
528         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
529         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
530         pub finalized_claimed_htlcs: Vec<HTLCSource>,
531         pub funding_broadcastable: Option<Transaction>,
532         pub channel_ready: Option<msgs::ChannelReady>,
533         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
534 }
535
536 /// The return value of `channel_reestablish`
537 pub(super) struct ReestablishResponses {
538         pub channel_ready: Option<msgs::ChannelReady>,
539         pub raa: Option<msgs::RevokeAndACK>,
540         pub commitment_update: Option<msgs::CommitmentUpdate>,
541         pub order: RAACommitmentOrder,
542         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
543         pub shutdown_msg: Option<msgs::Shutdown>,
544 }
545
546 /// The return type of `force_shutdown`
547 ///
548 /// Contains a tuple with the following:
549 /// - An optional (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
550 /// - A list of HTLCs to fail back in the form of the (source, payment hash, and this channel's
551 /// counterparty_node_id and channel_id).
552 /// - An optional transaction id identifying a corresponding batch funding transaction.
553 pub(crate) type ShutdownResult = (
554         Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
555         Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
556         Option<Txid>
557 );
558
559 /// If the majority of the channels funds are to the fundee and the initiator holds only just
560 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
561 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
562 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
563 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
564 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
565 /// by this multiple without hitting this case, before sending.
566 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
567 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
568 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
569 /// leave the channel less usable as we hold a bigger reserve.
570 #[cfg(any(fuzzing, test))]
571 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
572 #[cfg(not(any(fuzzing, test)))]
573 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
574
575 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
576 /// channel creation on an inbound channel, we simply force-close and move on.
577 /// This constant is the one suggested in BOLT 2.
578 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
579
580 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
581 /// not have enough balance value remaining to cover the onchain cost of this new
582 /// HTLC weight. If this happens, our counterparty fails the reception of our
583 /// commitment_signed including this new HTLC due to infringement on the channel
584 /// reserve.
585 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
586 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
587 /// leads to a channel force-close. Ultimately, this is an issue coming from the
588 /// design of LN state machines, allowing asynchronous updates.
589 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
590
591 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
592 /// commitment transaction fees, with at least this many HTLCs present on the commitment
593 /// transaction (not counting the value of the HTLCs themselves).
594 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
595
596 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
597 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
598 /// ChannelUpdate prompted by the config update. This value was determined as follows:
599 ///
600 ///   * The expected interval between ticks (1 minute).
601 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
602 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
603 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
604 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
605
606 /// The number of ticks that may elapse while we're waiting for a response to a
607 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
608 /// them.
609 ///
610 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
611 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
612
613 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
614 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
615 /// exceeding this age limit will be force-closed and purged from memory.
616 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
617
618 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
619 pub(crate) const COINBASE_MATURITY: u32 = 100;
620
621 struct PendingChannelMonitorUpdate {
622         update: ChannelMonitorUpdate,
623 }
624
625 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
626         (0, update, required),
627 });
628
629 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
630 /// its variants containing an appropriate channel struct.
631 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
632         UnfundedOutboundV1(OutboundV1Channel<SP>),
633         UnfundedInboundV1(InboundV1Channel<SP>),
634         Funded(Channel<SP>),
635 }
636
637 impl<'a, SP: Deref> ChannelPhase<SP> where
638         SP::Target: SignerProvider,
639         <SP::Target as SignerProvider>::Signer: ChannelSigner,
640 {
641         pub fn context(&'a self) -> &'a ChannelContext<SP> {
642                 match self {
643                         ChannelPhase::Funded(chan) => &chan.context,
644                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
645                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
646                 }
647         }
648
649         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
650                 match self {
651                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
652                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
653                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
654                 }
655         }
656 }
657
658 /// Contains all state common to unfunded inbound/outbound channels.
659 pub(super) struct UnfundedChannelContext {
660         /// A counter tracking how many ticks have elapsed since this unfunded channel was
661         /// created. If this unfunded channel reaches peer has yet to respond after reaching
662         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
663         ///
664         /// This is so that we don't keep channels around that haven't progressed to a funded state
665         /// in a timely manner.
666         unfunded_channel_age_ticks: usize,
667 }
668
669 impl UnfundedChannelContext {
670         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
671         /// having reached the unfunded channel age limit.
672         ///
673         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
674         pub fn should_expire_unfunded_channel(&mut self) -> bool {
675                 self.unfunded_channel_age_ticks += 1;
676                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
677         }
678 }
679
680 /// Contains everything about the channel including state, and various flags.
681 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
682         config: LegacyChannelConfig,
683
684         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
685         // constructed using it. The second element in the tuple corresponds to the number of ticks that
686         // have elapsed since the update occurred.
687         prev_config: Option<(ChannelConfig, usize)>,
688
689         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
690
691         user_id: u128,
692
693         /// The current channel ID.
694         channel_id: ChannelId,
695         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
696         /// Will be `None` for channels created prior to 0.0.115.
697         temporary_channel_id: Option<ChannelId>,
698         channel_state: u32,
699
700         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
701         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
702         // next connect.
703         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
704         // Note that a number of our tests were written prior to the behavior here which retransmits
705         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
706         // many tests.
707         #[cfg(any(test, feature = "_test_utils"))]
708         pub(crate) announcement_sigs_state: AnnouncementSigsState,
709         #[cfg(not(any(test, feature = "_test_utils")))]
710         announcement_sigs_state: AnnouncementSigsState,
711
712         secp_ctx: Secp256k1<secp256k1::All>,
713         channel_value_satoshis: u64,
714
715         latest_monitor_update_id: u64,
716
717         holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
718         shutdown_scriptpubkey: Option<ShutdownScript>,
719         destination_script: Script,
720
721         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
722         // generation start at 0 and count up...this simplifies some parts of implementation at the
723         // cost of others, but should really just be changed.
724
725         cur_holder_commitment_transaction_number: u64,
726         cur_counterparty_commitment_transaction_number: u64,
727         value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
728         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
729         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
730         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
731
732         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
733         /// need to ensure we resend them in the order we originally generated them. Note that because
734         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
735         /// sufficient to simply set this to the opposite of any message we are generating as we
736         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
737         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
738         /// send it first.
739         resend_order: RAACommitmentOrder,
740
741         monitor_pending_channel_ready: bool,
742         monitor_pending_revoke_and_ack: bool,
743         monitor_pending_commitment_signed: bool,
744
745         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
746         // responsible for some of the HTLCs here or not - we don't know whether the update in question
747         // completed or not. We currently ignore these fields entirely when force-closing a channel,
748         // but need to handle this somehow or we run the risk of losing HTLCs!
749         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
750         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
751         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
752
753         /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
754         /// but our signer (initially) refused to give us a signature, we should retry at some point in
755         /// the future when the signer indicates it may have a signature for us.
756         ///
757         /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
758         /// setting it again as a side-effect of [`Channel::channel_reestablish`].
759         signer_pending_commitment_update: bool,
760
761         // pending_update_fee is filled when sending and receiving update_fee.
762         //
763         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
764         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
765         // generating new commitment transactions with exactly the same criteria as inbound/outbound
766         // HTLCs with similar state.
767         pending_update_fee: Option<(u32, FeeUpdateState)>,
768         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
769         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
770         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
771         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
772         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
773         holding_cell_update_fee: Option<u32>,
774         next_holder_htlc_id: u64,
775         next_counterparty_htlc_id: u64,
776         feerate_per_kw: u32,
777
778         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
779         /// when the channel is updated in ways which may impact the `channel_update` message or when a
780         /// new block is received, ensuring it's always at least moderately close to the current real
781         /// time.
782         update_time_counter: u32,
783
784         #[cfg(debug_assertions)]
785         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
786         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
787         #[cfg(debug_assertions)]
788         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
789         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
790
791         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
792         target_closing_feerate_sats_per_kw: Option<u32>,
793
794         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
795         /// update, we need to delay processing it until later. We do that here by simply storing the
796         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
797         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
798
799         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
800         /// transaction. These are set once we reach `closing_negotiation_ready`.
801         #[cfg(test)]
802         pub(crate) closing_fee_limits: Option<(u64, u64)>,
803         #[cfg(not(test))]
804         closing_fee_limits: Option<(u64, u64)>,
805
806         /// The hash of the block in which the funding transaction was included.
807         funding_tx_confirmed_in: Option<BlockHash>,
808         funding_tx_confirmation_height: u32,
809         short_channel_id: Option<u64>,
810         /// Either the height at which this channel was created or the height at which it was last
811         /// serialized if it was serialized by versions prior to 0.0.103.
812         /// We use this to close if funding is never broadcasted.
813         channel_creation_height: u32,
814
815         counterparty_dust_limit_satoshis: u64,
816
817         #[cfg(test)]
818         pub(super) holder_dust_limit_satoshis: u64,
819         #[cfg(not(test))]
820         holder_dust_limit_satoshis: u64,
821
822         #[cfg(test)]
823         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
824         #[cfg(not(test))]
825         counterparty_max_htlc_value_in_flight_msat: u64,
826
827         #[cfg(test)]
828         pub(super) holder_max_htlc_value_in_flight_msat: u64,
829         #[cfg(not(test))]
830         holder_max_htlc_value_in_flight_msat: u64,
831
832         /// minimum channel reserve for self to maintain - set by them.
833         counterparty_selected_channel_reserve_satoshis: Option<u64>,
834
835         #[cfg(test)]
836         pub(super) holder_selected_channel_reserve_satoshis: u64,
837         #[cfg(not(test))]
838         holder_selected_channel_reserve_satoshis: u64,
839
840         counterparty_htlc_minimum_msat: u64,
841         holder_htlc_minimum_msat: u64,
842         #[cfg(test)]
843         pub counterparty_max_accepted_htlcs: u16,
844         #[cfg(not(test))]
845         counterparty_max_accepted_htlcs: u16,
846         holder_max_accepted_htlcs: u16,
847         minimum_depth: Option<u32>,
848
849         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
850
851         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
852         funding_transaction: Option<Transaction>,
853         is_batch_funding: Option<()>,
854
855         counterparty_cur_commitment_point: Option<PublicKey>,
856         counterparty_prev_commitment_point: Option<PublicKey>,
857         counterparty_node_id: PublicKey,
858
859         counterparty_shutdown_scriptpubkey: Option<Script>,
860
861         commitment_secrets: CounterpartyCommitmentSecrets,
862
863         channel_update_status: ChannelUpdateStatus,
864         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
865         /// not complete within a single timer tick (one minute), we should force-close the channel.
866         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
867         /// to DoS us.
868         /// Note that this field is reset to false on deserialization to give us a chance to connect to
869         /// our peer and start the closing_signed negotiation fresh.
870         closing_signed_in_flight: bool,
871
872         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
873         /// This can be used to rebroadcast the channel_announcement message later.
874         announcement_sigs: Option<(Signature, Signature)>,
875
876         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
877         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
878         // be, by comparing the cached values to the fee of the tranaction generated by
879         // `build_commitment_transaction`.
880         #[cfg(any(test, fuzzing))]
881         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
882         #[cfg(any(test, fuzzing))]
883         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
884
885         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
886         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
887         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
888         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
889         /// message until we receive a channel_reestablish.
890         ///
891         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
892         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
893
894         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
895         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
896         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
897         /// unblock the state machine.
898         ///
899         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
900         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
901         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
902         ///
903         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
904         /// [`msgs::RevokeAndACK`] message from the counterparty.
905         sent_message_awaiting_response: Option<usize>,
906
907         #[cfg(any(test, fuzzing))]
908         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
909         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
910         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
911         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
912         // is fine, but as a sanity check in our failure to generate the second claim, we check here
913         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
914         historical_inbound_htlc_fulfills: HashSet<u64>,
915
916         /// This channel's type, as negotiated during channel open
917         channel_type: ChannelTypeFeatures,
918
919         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
920         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
921         // the channel's funding UTXO.
922         //
923         // We also use this when sending our peer a channel_update that isn't to be broadcasted
924         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
925         // associated channel mapping.
926         //
927         // We only bother storing the most recent SCID alias at any time, though our counterparty has
928         // to store all of them.
929         latest_inbound_scid_alias: Option<u64>,
930
931         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
932         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
933         // don't currently support node id aliases and eventually privacy should be provided with
934         // blinded paths instead of simple scid+node_id aliases.
935         outbound_scid_alias: u64,
936
937         // We track whether we already emitted a `ChannelPending` event.
938         channel_pending_event_emitted: bool,
939
940         // We track whether we already emitted a `ChannelReady` event.
941         channel_ready_event_emitted: bool,
942
943         /// The unique identifier used to re-derive the private key material for the channel through
944         /// [`SignerProvider::derive_channel_signer`].
945         channel_keys_id: [u8; 32],
946
947         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
948         /// store it here and only release it to the `ChannelManager` once it asks for it.
949         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
950 }
951
952 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
953         /// Allowed in any state (including after shutdown)
954         pub fn get_update_time_counter(&self) -> u32 {
955                 self.update_time_counter
956         }
957
958         pub fn get_latest_monitor_update_id(&self) -> u64 {
959                 self.latest_monitor_update_id
960         }
961
962         pub fn should_announce(&self) -> bool {
963                 self.config.announced_channel
964         }
965
966         pub fn is_outbound(&self) -> bool {
967                 self.channel_transaction_parameters.is_outbound_from_holder
968         }
969
970         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
971         /// Allowed in any state (including after shutdown)
972         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
973                 self.config.options.forwarding_fee_base_msat
974         }
975
976         /// Returns true if we've ever received a message from the remote end for this Channel
977         pub fn have_received_message(&self) -> bool {
978                 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
979         }
980
981         /// Returns true if this channel is fully established and not known to be closing.
982         /// Allowed in any state (including after shutdown)
983         pub fn is_usable(&self) -> bool {
984                 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
985                 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
986         }
987
988         /// shutdown state returns the state of the channel in its various stages of shutdown
989         pub fn shutdown_state(&self) -> ChannelShutdownState {
990                 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
991                         return ChannelShutdownState::ShutdownComplete;
992                 }
993                 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
994                         return ChannelShutdownState::ShutdownInitiated;
995                 }
996                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
997                         return ChannelShutdownState::ResolvingHTLCs;
998                 }
999                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1000                         return ChannelShutdownState::NegotiatingClosingFee;
1001                 }
1002                 return ChannelShutdownState::NotShuttingDown;
1003         }
1004
1005         fn closing_negotiation_ready(&self) -> bool {
1006                 self.pending_inbound_htlcs.is_empty() &&
1007                 self.pending_outbound_htlcs.is_empty() &&
1008                 self.pending_update_fee.is_none() &&
1009                 self.channel_state &
1010                 (BOTH_SIDES_SHUTDOWN_MASK |
1011                         ChannelState::AwaitingRemoteRevoke as u32 |
1012                         ChannelState::PeerDisconnected as u32 |
1013                         ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1014         }
1015
1016         /// Returns true if this channel is currently available for use. This is a superset of
1017         /// is_usable() and considers things like the channel being temporarily disabled.
1018         /// Allowed in any state (including after shutdown)
1019         pub fn is_live(&self) -> bool {
1020                 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1021         }
1022
1023         // Public utilities:
1024
1025         pub fn channel_id(&self) -> ChannelId {
1026                 self.channel_id
1027         }
1028
1029         // Return the `temporary_channel_id` used during channel establishment.
1030         //
1031         // Will return `None` for channels created prior to LDK version 0.0.115.
1032         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1033                 self.temporary_channel_id
1034         }
1035
1036         pub fn minimum_depth(&self) -> Option<u32> {
1037                 self.minimum_depth
1038         }
1039
1040         /// Gets the "user_id" value passed into the construction of this channel. It has no special
1041         /// meaning and exists only to allow users to have a persistent identifier of a channel.
1042         pub fn get_user_id(&self) -> u128 {
1043                 self.user_id
1044         }
1045
1046         /// Gets the channel's type
1047         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1048                 &self.channel_type
1049         }
1050
1051         /// Gets the channel's `short_channel_id`.
1052         ///
1053         /// Will return `None` if the channel hasn't been confirmed yet.
1054         pub fn get_short_channel_id(&self) -> Option<u64> {
1055                 self.short_channel_id
1056         }
1057
1058         /// Allowed in any state (including after shutdown)
1059         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1060                 self.latest_inbound_scid_alias
1061         }
1062
1063         /// Allowed in any state (including after shutdown)
1064         pub fn outbound_scid_alias(&self) -> u64 {
1065                 self.outbound_scid_alias
1066         }
1067
1068         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1069         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1070         /// or prior to any channel actions during `Channel` initialization.
1071         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1072                 debug_assert_eq!(self.outbound_scid_alias, 0);
1073                 self.outbound_scid_alias = outbound_scid_alias;
1074         }
1075
1076         /// Returns the funding_txo we either got from our peer, or were given by
1077         /// get_funding_created.
1078         pub fn get_funding_txo(&self) -> Option<OutPoint> {
1079                 self.channel_transaction_parameters.funding_outpoint
1080         }
1081
1082         /// Returns the block hash in which our funding transaction was confirmed.
1083         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1084                 self.funding_tx_confirmed_in
1085         }
1086
1087         /// Returns the current number of confirmations on the funding transaction.
1088         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1089                 if self.funding_tx_confirmation_height == 0 {
1090                         // We either haven't seen any confirmation yet, or observed a reorg.
1091                         return 0;
1092                 }
1093
1094                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1095         }
1096
1097         fn get_holder_selected_contest_delay(&self) -> u16 {
1098                 self.channel_transaction_parameters.holder_selected_contest_delay
1099         }
1100
1101         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1102                 &self.channel_transaction_parameters.holder_pubkeys
1103         }
1104
1105         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1106                 self.channel_transaction_parameters.counterparty_parameters
1107                         .as_ref().map(|params| params.selected_contest_delay)
1108         }
1109
1110         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1111                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1112         }
1113
1114         /// Allowed in any state (including after shutdown)
1115         pub fn get_counterparty_node_id(&self) -> PublicKey {
1116                 self.counterparty_node_id
1117         }
1118
1119         /// Allowed in any state (including after shutdown)
1120         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1121                 self.holder_htlc_minimum_msat
1122         }
1123
1124         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1125         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1126                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1127         }
1128
1129         /// Allowed in any state (including after shutdown)
1130         pub fn get_announced_htlc_max_msat(&self) -> u64 {
1131                 return cmp::min(
1132                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1133                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
1134                         // channel might have been used to route very small values (either by honest users or as DoS).
1135                         self.channel_value_satoshis * 1000 * 9 / 10,
1136
1137                         self.counterparty_max_htlc_value_in_flight_msat
1138                 );
1139         }
1140
1141         /// Allowed in any state (including after shutdown)
1142         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1143                 self.counterparty_htlc_minimum_msat
1144         }
1145
1146         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1147         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1148                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1149         }
1150
1151         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1152                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1153                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1154                         cmp::min(
1155                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1156                                 party_max_htlc_value_in_flight_msat
1157                         )
1158                 })
1159         }
1160
1161         pub fn get_value_satoshis(&self) -> u64 {
1162                 self.channel_value_satoshis
1163         }
1164
1165         pub fn get_fee_proportional_millionths(&self) -> u32 {
1166                 self.config.options.forwarding_fee_proportional_millionths
1167         }
1168
1169         pub fn get_cltv_expiry_delta(&self) -> u16 {
1170                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1171         }
1172
1173         pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1174                 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1175         where F::Target: FeeEstimator
1176         {
1177                 match self.config.options.max_dust_htlc_exposure {
1178                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1179                                 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1180                                         ConfirmationTarget::OnChainSweep);
1181                                 feerate_per_kw as u64 * multiplier
1182                         },
1183                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1184                 }
1185         }
1186
1187         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1188         pub fn prev_config(&self) -> Option<ChannelConfig> {
1189                 self.prev_config.map(|prev_config| prev_config.0)
1190         }
1191
1192         // Checks whether we should emit a `ChannelPending` event.
1193         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1194                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1195         }
1196
1197         // Returns whether we already emitted a `ChannelPending` event.
1198         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1199                 self.channel_pending_event_emitted
1200         }
1201
1202         // Remembers that we already emitted a `ChannelPending` event.
1203         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1204                 self.channel_pending_event_emitted = true;
1205         }
1206
1207         // Checks whether we should emit a `ChannelReady` event.
1208         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1209                 self.is_usable() && !self.channel_ready_event_emitted
1210         }
1211
1212         // Remembers that we already emitted a `ChannelReady` event.
1213         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1214                 self.channel_ready_event_emitted = true;
1215         }
1216
1217         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1218         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1219         /// no longer be considered when forwarding HTLCs.
1220         pub fn maybe_expire_prev_config(&mut self) {
1221                 if self.prev_config.is_none() {
1222                         return;
1223                 }
1224                 let prev_config = self.prev_config.as_mut().unwrap();
1225                 prev_config.1 += 1;
1226                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1227                         self.prev_config = None;
1228                 }
1229         }
1230
1231         /// Returns the current [`ChannelConfig`] applied to the channel.
1232         pub fn config(&self) -> ChannelConfig {
1233                 self.config.options
1234         }
1235
1236         /// Updates the channel's config. A bool is returned indicating whether the config update
1237         /// applied resulted in a new ChannelUpdate message.
1238         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1239                 let did_channel_update =
1240                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1241                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1242                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1243                 if did_channel_update {
1244                         self.prev_config = Some((self.config.options, 0));
1245                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1246                         // policy change to propagate throughout the network.
1247                         self.update_time_counter += 1;
1248                 }
1249                 self.config.options = *config;
1250                 did_channel_update
1251         }
1252
1253         /// Returns true if funding_signed was sent/received and the
1254         /// funding transaction has been broadcast if necessary.
1255         pub fn is_funding_broadcast(&self) -> bool {
1256                 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1257                         self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1258         }
1259
1260         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1261         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1262         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1263         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1264         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1265         /// an HTLC to a).
1266         /// @local is used only to convert relevant internal structures which refer to remote vs local
1267         /// to decide value of outputs and direction of HTLCs.
1268         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1269         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1270         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1271         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1272         /// which peer generated this transaction and "to whom" this transaction flows.
1273         #[inline]
1274         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1275                 where L::Target: Logger
1276         {
1277                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1278                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1279                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1280
1281                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1282                 let mut remote_htlc_total_msat = 0;
1283                 let mut local_htlc_total_msat = 0;
1284                 let mut value_to_self_msat_offset = 0;
1285
1286                 let mut feerate_per_kw = self.feerate_per_kw;
1287                 if let Some((feerate, update_state)) = self.pending_update_fee {
1288                         if match update_state {
1289                                 // Note that these match the inclusion criteria when scanning
1290                                 // pending_inbound_htlcs below.
1291                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1292                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1293                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
1294                         } {
1295                                 feerate_per_kw = feerate;
1296                         }
1297                 }
1298
1299                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1300                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1301                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1302                         &self.channel_id,
1303                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1304
1305                 macro_rules! get_htlc_in_commitment {
1306                         ($htlc: expr, $offered: expr) => {
1307                                 HTLCOutputInCommitment {
1308                                         offered: $offered,
1309                                         amount_msat: $htlc.amount_msat,
1310                                         cltv_expiry: $htlc.cltv_expiry,
1311                                         payment_hash: $htlc.payment_hash,
1312                                         transaction_output_index: None
1313                                 }
1314                         }
1315                 }
1316
1317                 macro_rules! add_htlc_output {
1318                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1319                                 if $outbound == local { // "offered HTLC output"
1320                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1321                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1322                                                 0
1323                                         } else {
1324                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1325                                         };
1326                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1327                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1328                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1329                                         } else {
1330                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1331                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1332                                         }
1333                                 } else {
1334                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1335                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1336                                                 0
1337                                         } else {
1338                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1339                                         };
1340                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1341                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1342                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1343                                         } else {
1344                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1345                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1346                                         }
1347                                 }
1348                         }
1349                 }
1350
1351                 for ref htlc in self.pending_inbound_htlcs.iter() {
1352                         let (include, state_name) = match htlc.state {
1353                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1354                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1355                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1356                                 InboundHTLCState::Committed => (true, "Committed"),
1357                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1358                         };
1359
1360                         if include {
1361                                 add_htlc_output!(htlc, false, None, state_name);
1362                                 remote_htlc_total_msat += htlc.amount_msat;
1363                         } else {
1364                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1365                                 match &htlc.state {
1366                                         &InboundHTLCState::LocalRemoved(ref reason) => {
1367                                                 if generated_by_local {
1368                                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1369                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
1370                                                         }
1371                                                 }
1372                                         },
1373                                         _ => {},
1374                                 }
1375                         }
1376                 }
1377
1378                 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1379
1380                 for ref htlc in self.pending_outbound_htlcs.iter() {
1381                         let (include, state_name) = match htlc.state {
1382                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1383                                 OutboundHTLCState::Committed => (true, "Committed"),
1384                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1385                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1386                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1387                         };
1388
1389                         let preimage_opt = match htlc.state {
1390                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1391                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1392                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1393                                 _ => None,
1394                         };
1395
1396                         if let Some(preimage) = preimage_opt {
1397                                 preimages.push(preimage);
1398                         }
1399
1400                         if include {
1401                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1402                                 local_htlc_total_msat += htlc.amount_msat;
1403                         } else {
1404                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1405                                 match htlc.state {
1406                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1407                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
1408                                         },
1409                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1410                                                 if !generated_by_local {
1411                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
1412                                                 }
1413                                         },
1414                                         _ => {},
1415                                 }
1416                         }
1417                 }
1418
1419                 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1420                 assert!(value_to_self_msat >= 0);
1421                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1422                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1423                 // "violate" their reserve value by couting those against it. Thus, we have to convert
1424                 // everything to i64 before subtracting as otherwise we can overflow.
1425                 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1426                 assert!(value_to_remote_msat >= 0);
1427
1428                 #[cfg(debug_assertions)]
1429                 {
1430                         // Make sure that the to_self/to_remote is always either past the appropriate
1431                         // channel_reserve *or* it is making progress towards it.
1432                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1433                                 self.holder_max_commitment_tx_output.lock().unwrap()
1434                         } else {
1435                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
1436                         };
1437                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1438                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1439                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1440                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1441                 }
1442
1443                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1444                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1445                 let (value_to_self, value_to_remote) = if self.is_outbound() {
1446                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1447                 } else {
1448                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1449                 };
1450
1451                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1452                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1453                 let (funding_pubkey_a, funding_pubkey_b) = if local {
1454                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1455                 } else {
1456                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1457                 };
1458
1459                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1460                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1461                 } else {
1462                         value_to_a = 0;
1463                 }
1464
1465                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1466                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1467                 } else {
1468                         value_to_b = 0;
1469                 }
1470
1471                 let num_nondust_htlcs = included_non_dust_htlcs.len();
1472
1473                 let channel_parameters =
1474                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1475                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1476                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1477                                                                              value_to_a as u64,
1478                                                                              value_to_b as u64,
1479                                                                              funding_pubkey_a,
1480                                                                              funding_pubkey_b,
1481                                                                              keys.clone(),
1482                                                                              feerate_per_kw,
1483                                                                              &mut included_non_dust_htlcs,
1484                                                                              &channel_parameters
1485                 );
1486                 let mut htlcs_included = included_non_dust_htlcs;
1487                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1488                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1489                 htlcs_included.append(&mut included_dust_htlcs);
1490
1491                 // For the stats, trimmed-to-0 the value in msats accordingly
1492                 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1493                 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1494
1495                 CommitmentStats {
1496                         tx,
1497                         feerate_per_kw,
1498                         total_fee_sat,
1499                         num_nondust_htlcs,
1500                         htlcs_included,
1501                         local_balance_msat: value_to_self_msat as u64,
1502                         remote_balance_msat: value_to_remote_msat as u64,
1503                         preimages
1504                 }
1505         }
1506
1507         #[inline]
1508         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1509         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1510         /// our counterparty!)
1511         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1512         /// TODO Some magic rust shit to compile-time check this?
1513         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1514                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1515                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1516                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1517                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1518
1519                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1520         }
1521
1522         #[inline]
1523         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1524         /// will sign and send to our counterparty.
1525         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1526         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1527                 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1528                 //may see payments to it!
1529                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1530                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1531                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1532
1533                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1534         }
1535
1536         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1537         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1538         /// Panics if called before accept_channel/InboundV1Channel::new
1539         pub fn get_funding_redeemscript(&self) -> Script {
1540                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1541         }
1542
1543         fn counterparty_funding_pubkey(&self) -> &PublicKey {
1544                 &self.get_counterparty_pubkeys().funding_pubkey
1545         }
1546
1547         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1548                 self.feerate_per_kw
1549         }
1550
1551         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1552                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1553                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1554                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1555                 // more dust balance if the feerate increases when we have several HTLCs pending
1556                 // which are near the dust limit.
1557                 let mut feerate_per_kw = self.feerate_per_kw;
1558                 // If there's a pending update fee, use it to ensure we aren't under-estimating
1559                 // potential feerate updates coming soon.
1560                 if let Some((feerate, _)) = self.pending_update_fee {
1561                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1562                 }
1563                 if let Some(feerate) = outbound_feerate_update {
1564                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1565                 }
1566                 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1567         }
1568
1569         /// Get forwarding information for the counterparty.
1570         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1571                 self.counterparty_forwarding_info.clone()
1572         }
1573
1574         /// Returns a HTLCStats about inbound pending htlcs
1575         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1576                 let context = self;
1577                 let mut stats = HTLCStats {
1578                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1579                         pending_htlcs_value_msat: 0,
1580                         on_counterparty_tx_dust_exposure_msat: 0,
1581                         on_holder_tx_dust_exposure_msat: 0,
1582                         holding_cell_msat: 0,
1583                         on_holder_tx_holding_cell_htlcs_count: 0,
1584                 };
1585
1586                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1587                         (0, 0)
1588                 } else {
1589                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1590                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1591                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1592                 };
1593                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1594                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1595                 for ref htlc in context.pending_inbound_htlcs.iter() {
1596                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1597                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1598                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1599                         }
1600                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1601                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1602                         }
1603                 }
1604                 stats
1605         }
1606
1607         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1608         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1609                 let context = self;
1610                 let mut stats = HTLCStats {
1611                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1612                         pending_htlcs_value_msat: 0,
1613                         on_counterparty_tx_dust_exposure_msat: 0,
1614                         on_holder_tx_dust_exposure_msat: 0,
1615                         holding_cell_msat: 0,
1616                         on_holder_tx_holding_cell_htlcs_count: 0,
1617                 };
1618
1619                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1620                         (0, 0)
1621                 } else {
1622                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1623                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1624                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1625                 };
1626                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1627                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1628                 for ref htlc in context.pending_outbound_htlcs.iter() {
1629                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1630                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1631                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1632                         }
1633                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1634                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1635                         }
1636                 }
1637
1638                 for update in context.holding_cell_htlc_updates.iter() {
1639                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1640                                 stats.pending_htlcs += 1;
1641                                 stats.pending_htlcs_value_msat += amount_msat;
1642                                 stats.holding_cell_msat += amount_msat;
1643                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1644                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1645                                 }
1646                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1647                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
1648                                 } else {
1649                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
1650                                 }
1651                         }
1652                 }
1653                 stats
1654         }
1655
1656         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1657         /// Doesn't bother handling the
1658         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1659         /// corner case properly.
1660         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1661         -> AvailableBalances
1662         where F::Target: FeeEstimator
1663         {
1664                 let context = &self;
1665                 // Note that we have to handle overflow due to the above case.
1666                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1667                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1668
1669                 let mut balance_msat = context.value_to_self_msat;
1670                 for ref htlc in context.pending_inbound_htlcs.iter() {
1671                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1672                                 balance_msat += htlc.amount_msat;
1673                         }
1674                 }
1675                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1676
1677                 let outbound_capacity_msat = context.value_to_self_msat
1678                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1679                                 .saturating_sub(
1680                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1681
1682                 let mut available_capacity_msat = outbound_capacity_msat;
1683
1684                 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1685                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1686                 } else {
1687                         0
1688                 };
1689                 if context.is_outbound() {
1690                         // We should mind channel commit tx fee when computing how much of the available capacity
1691                         // can be used in the next htlc. Mirrors the logic in send_htlc.
1692                         //
1693                         // The fee depends on whether the amount we will be sending is above dust or not,
1694                         // and the answer will in turn change the amount itself â€” making it a circular
1695                         // dependency.
1696                         // This complicates the computation around dust-values, up to the one-htlc-value.
1697                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1698                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1699                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1700                         }
1701
1702                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1703                         let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1704                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1705                         let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1706                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1707                                 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1708                                 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1709                         }
1710
1711                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
1712                         // value ends up being below dust, we have this fee available again. In that case,
1713                         // match the value to right-below-dust.
1714                         let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1715                                 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1716                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1717                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1718                                 debug_assert!(one_htlc_difference_msat != 0);
1719                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1720                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1721                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1722                         } else {
1723                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1724                         }
1725                 } else {
1726                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1727                         // sending a new HTLC won't reduce their balance below our reserve threshold.
1728                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1729                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1730                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1731                         }
1732
1733                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1734                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1735
1736                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1737                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1738                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1739
1740                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1741                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1742                                 // we've selected for them, we can only send dust HTLCs.
1743                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1744                         }
1745                 }
1746
1747                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1748
1749                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1750                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1751                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1752                 // send above the dust limit (as the router can always overpay to meet the dust limit).
1753                 let mut remaining_msat_below_dust_exposure_limit = None;
1754                 let mut dust_exposure_dust_limit_msat = 0;
1755                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1756
1757                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1758                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1759                 } else {
1760                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1761                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1762                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1763                 };
1764                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1765                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1766                         remaining_msat_below_dust_exposure_limit =
1767                                 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1768                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1769                 }
1770
1771                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1772                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1773                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1774                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1775                                 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1776                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1777                 }
1778
1779                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1780                         if available_capacity_msat < dust_exposure_dust_limit_msat {
1781                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1782                         } else {
1783                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1784                         }
1785                 }
1786
1787                 available_capacity_msat = cmp::min(available_capacity_msat,
1788                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1789
1790                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1791                         available_capacity_msat = 0;
1792                 }
1793
1794                 AvailableBalances {
1795                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1796                                         - context.value_to_self_msat as i64
1797                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1798                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1799                                 0) as u64,
1800                         outbound_capacity_msat,
1801                         next_outbound_htlc_limit_msat: available_capacity_msat,
1802                         next_outbound_htlc_minimum_msat,
1803                         balance_msat,
1804                 }
1805         }
1806
1807         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1808                 let context = &self;
1809                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1810         }
1811
1812         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1813         /// number of pending HTLCs that are on track to be in our next commitment tx.
1814         ///
1815         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1816         /// `fee_spike_buffer_htlc` is `Some`.
1817         ///
1818         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1819         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1820         ///
1821         /// Dust HTLCs are excluded.
1822         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1823                 let context = &self;
1824                 assert!(context.is_outbound());
1825
1826                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1827                         (0, 0)
1828                 } else {
1829                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1830                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1831                 };
1832                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1833                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1834
1835                 let mut addl_htlcs = 0;
1836                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1837                 match htlc.origin {
1838                         HTLCInitiator::LocalOffered => {
1839                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1840                                         addl_htlcs += 1;
1841                                 }
1842                         },
1843                         HTLCInitiator::RemoteOffered => {
1844                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1845                                         addl_htlcs += 1;
1846                                 }
1847                         }
1848                 }
1849
1850                 let mut included_htlcs = 0;
1851                 for ref htlc in context.pending_inbound_htlcs.iter() {
1852                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1853                                 continue
1854                         }
1855                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1856                         // transaction including this HTLC if it times out before they RAA.
1857                         included_htlcs += 1;
1858                 }
1859
1860                 for ref htlc in context.pending_outbound_htlcs.iter() {
1861                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1862                                 continue
1863                         }
1864                         match htlc.state {
1865                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1866                                 OutboundHTLCState::Committed => included_htlcs += 1,
1867                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1868                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1869                                 // transaction won't be generated until they send us their next RAA, which will mean
1870                                 // dropping any HTLCs in this state.
1871                                 _ => {},
1872                         }
1873                 }
1874
1875                 for htlc in context.holding_cell_htlc_updates.iter() {
1876                         match htlc {
1877                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1878                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
1879                                                 continue
1880                                         }
1881                                         included_htlcs += 1
1882                                 },
1883                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1884                                          // ack we're guaranteed to never include them in commitment txs anymore.
1885                         }
1886                 }
1887
1888                 let num_htlcs = included_htlcs + addl_htlcs;
1889                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1890                 #[cfg(any(test, fuzzing))]
1891                 {
1892                         let mut fee = res;
1893                         if fee_spike_buffer_htlc.is_some() {
1894                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1895                         }
1896                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1897                                 + context.holding_cell_htlc_updates.len();
1898                         let commitment_tx_info = CommitmentTxInfoCached {
1899                                 fee,
1900                                 total_pending_htlcs,
1901                                 next_holder_htlc_id: match htlc.origin {
1902                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1903                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1904                                 },
1905                                 next_counterparty_htlc_id: match htlc.origin {
1906                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1907                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1908                                 },
1909                                 feerate: context.feerate_per_kw,
1910                         };
1911                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1912                 }
1913                 res
1914         }
1915
1916         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1917         /// pending HTLCs that are on track to be in their next commitment tx
1918         ///
1919         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1920         /// `fee_spike_buffer_htlc` is `Some`.
1921         ///
1922         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1923         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1924         ///
1925         /// Dust HTLCs are excluded.
1926         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1927                 let context = &self;
1928                 assert!(!context.is_outbound());
1929
1930                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1931                         (0, 0)
1932                 } else {
1933                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1934                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1935                 };
1936                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1937                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1938
1939                 let mut addl_htlcs = 0;
1940                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1941                 match htlc.origin {
1942                         HTLCInitiator::LocalOffered => {
1943                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1944                                         addl_htlcs += 1;
1945                                 }
1946                         },
1947                         HTLCInitiator::RemoteOffered => {
1948                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1949                                         addl_htlcs += 1;
1950                                 }
1951                         }
1952                 }
1953
1954                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1955                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1956                 // committed outbound HTLCs, see below.
1957                 let mut included_htlcs = 0;
1958                 for ref htlc in context.pending_inbound_htlcs.iter() {
1959                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1960                                 continue
1961                         }
1962                         included_htlcs += 1;
1963                 }
1964
1965                 for ref htlc in context.pending_outbound_htlcs.iter() {
1966                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1967                                 continue
1968                         }
1969                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1970                         // i.e. if they've responded to us with an RAA after announcement.
1971                         match htlc.state {
1972                                 OutboundHTLCState::Committed => included_htlcs += 1,
1973                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1974                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1975                                 _ => {},
1976                         }
1977                 }
1978
1979                 let num_htlcs = included_htlcs + addl_htlcs;
1980                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1981                 #[cfg(any(test, fuzzing))]
1982                 {
1983                         let mut fee = res;
1984                         if fee_spike_buffer_htlc.is_some() {
1985                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1986                         }
1987                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1988                         let commitment_tx_info = CommitmentTxInfoCached {
1989                                 fee,
1990                                 total_pending_htlcs,
1991                                 next_holder_htlc_id: match htlc.origin {
1992                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1993                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1994                                 },
1995                                 next_counterparty_htlc_id: match htlc.origin {
1996                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1997                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1998                                 },
1999                                 feerate: context.feerate_per_kw,
2000                         };
2001                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2002                 }
2003                 res
2004         }
2005
2006         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2007                 where F: Fn() -> Option<O> {
2008                 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2009                    self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2010                         f()
2011                 } else {
2012                         None
2013                 }
2014         }
2015
2016         /// Returns the transaction if there is a pending funding transaction that is yet to be
2017         /// broadcast.
2018         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2019                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2020         }
2021
2022         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2023         /// broadcast.
2024         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2025                 self.if_unbroadcasted_funding(||
2026                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2027                 )
2028         }
2029
2030         /// Returns whether the channel is funded in a batch.
2031         pub fn is_batch_funding(&self) -> bool {
2032                 self.is_batch_funding.is_some()
2033         }
2034
2035         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2036         /// broadcast.
2037         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2038                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2039         }
2040
2041         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2042         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2043         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2044         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2045         /// immediately (others we will have to allow to time out).
2046         pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2047                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2048                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2049                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2050                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2051                 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2052
2053                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2054                 // return them to fail the payment.
2055                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2056                 let counterparty_node_id = self.get_counterparty_node_id();
2057                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2058                         match htlc_update {
2059                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2060                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2061                                 },
2062                                 _ => {}
2063                         }
2064                 }
2065                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2066                         // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2067                         // returning a channel monitor update here would imply a channel monitor update before
2068                         // we even registered the channel monitor to begin with, which is invalid.
2069                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
2070                         // funding transaction, don't return a funding txo (which prevents providing the
2071                         // monitor update to the user, even if we return one).
2072                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2073                         if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2074                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2075                                 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2076                                         update_id: self.latest_monitor_update_id,
2077                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2078                                 }))
2079                         } else { None }
2080                 } else { None };
2081                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2082
2083                 self.channel_state = ChannelState::ShutdownComplete as u32;
2084                 self.update_time_counter += 1;
2085                 (monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid)
2086         }
2087 }
2088
2089 // Internal utility functions for channels
2090
2091 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2092 /// `channel_value_satoshis` in msat, set through
2093 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2094 ///
2095 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2096 ///
2097 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2098 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2099         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2100                 1
2101         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2102                 100
2103         } else {
2104                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2105         };
2106         channel_value_satoshis * 10 * configured_percent
2107 }
2108
2109 /// Returns a minimum channel reserve value the remote needs to maintain,
2110 /// required by us according to the configured or default
2111 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2112 ///
2113 /// Guaranteed to return a value no larger than channel_value_satoshis
2114 ///
2115 /// This is used both for outbound and inbound channels and has lower bound
2116 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2117 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2118         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2119         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2120 }
2121
2122 /// This is for legacy reasons, present for forward-compatibility.
2123 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2124 /// from storage. Hence, we use this function to not persist default values of
2125 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2126 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2127         let (q, _) = channel_value_satoshis.overflowing_div(100);
2128         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2129 }
2130
2131 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2132 // Note that num_htlcs should not include dust HTLCs.
2133 #[inline]
2134 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2135         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2136 }
2137
2138 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2139 // Note that num_htlcs should not include dust HTLCs.
2140 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2141         // Note that we need to divide before multiplying to round properly,
2142         // since the lowest denomination of bitcoin on-chain is the satoshi.
2143         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2144 }
2145
2146 // Holder designates channel data owned for the benefit of the user client.
2147 // Counterparty designates channel data owned by the another channel participant entity.
2148 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2149         pub context: ChannelContext<SP>,
2150 }
2151
2152 #[cfg(any(test, fuzzing))]
2153 struct CommitmentTxInfoCached {
2154         fee: u64,
2155         total_pending_htlcs: usize,
2156         next_holder_htlc_id: u64,
2157         next_counterparty_htlc_id: u64,
2158         feerate: u32,
2159 }
2160
2161 impl<SP: Deref> Channel<SP> where
2162         SP::Target: SignerProvider,
2163         <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2164 {
2165         fn check_remote_fee<F: Deref, L: Deref>(
2166                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2167                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2168         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2169         {
2170                 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2171                 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2172                 // We generally don't care too much if they set the feerate to something very high, but it
2173                 // could result in the channel being useless due to everything being dust. This doesn't
2174                 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2175                 // zero fee, so their fee is no longer considered to determine dust limits.
2176                 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2177                         let upper_limit =
2178                                 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64;
2179                         if feerate_per_kw as u64 > upper_limit {
2180                                 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2181                         }
2182                 }
2183
2184                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2185                         ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2186                 } else {
2187                         ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2188                 };
2189                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2190                 if feerate_per_kw < lower_limit {
2191                         if let Some(cur_feerate) = cur_feerate_per_kw {
2192                                 if feerate_per_kw > cur_feerate {
2193                                         log_warn!(logger,
2194                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2195                                                 cur_feerate, feerate_per_kw);
2196                                         return Ok(());
2197                                 }
2198                         }
2199                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2200                 }
2201                 Ok(())
2202         }
2203
2204         #[inline]
2205         fn get_closing_scriptpubkey(&self) -> Script {
2206                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2207                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2208                 // outside of those situations will fail.
2209                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2210         }
2211
2212         #[inline]
2213         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2214                 let mut ret =
2215                 (4 +                                                   // version
2216                  1 +                                                   // input count
2217                  36 +                                                  // prevout
2218                  1 +                                                   // script length (0)
2219                  4 +                                                   // sequence
2220                  1 +                                                   // output count
2221                  4                                                     // lock time
2222                  )*4 +                                                 // * 4 for non-witness parts
2223                 2 +                                                    // witness marker and flag
2224                 1 +                                                    // witness element count
2225                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
2226                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2227                 2*(1 + 71);                                            // two signatures + sighash type flags
2228                 if let Some(spk) = a_scriptpubkey {
2229                         ret += ((8+1) +                                    // output values and script length
2230                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2231                 }
2232                 if let Some(spk) = b_scriptpubkey {
2233                         ret += ((8+1) +                                    // output values and script length
2234                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2235                 }
2236                 ret
2237         }
2238
2239         #[inline]
2240         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2241                 assert!(self.context.pending_inbound_htlcs.is_empty());
2242                 assert!(self.context.pending_outbound_htlcs.is_empty());
2243                 assert!(self.context.pending_update_fee.is_none());
2244
2245                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2246                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2247                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2248
2249                 if value_to_holder < 0 {
2250                         assert!(self.context.is_outbound());
2251                         total_fee_satoshis += (-value_to_holder) as u64;
2252                 } else if value_to_counterparty < 0 {
2253                         assert!(!self.context.is_outbound());
2254                         total_fee_satoshis += (-value_to_counterparty) as u64;
2255                 }
2256
2257                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2258                         value_to_counterparty = 0;
2259                 }
2260
2261                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2262                         value_to_holder = 0;
2263                 }
2264
2265                 assert!(self.context.shutdown_scriptpubkey.is_some());
2266                 let holder_shutdown_script = self.get_closing_scriptpubkey();
2267                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2268                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2269
2270                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2271                 (closing_transaction, total_fee_satoshis)
2272         }
2273
2274         fn funding_outpoint(&self) -> OutPoint {
2275                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2276         }
2277
2278         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2279         /// entirely.
2280         ///
2281         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2282         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2283         ///
2284         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2285         /// disconnected).
2286         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2287                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2288         where L::Target: Logger {
2289                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2290                 // (see equivalent if condition there).
2291                 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2292                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2293                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2294                 self.context.latest_monitor_update_id = mon_update_id;
2295                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2296                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2297                 }
2298         }
2299
2300         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2301                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2302                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2303                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2304                 // either.
2305                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2306                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2307                 }
2308                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2309
2310                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2311                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2312                 // these, but for now we just have to treat them as normal.
2313
2314                 let mut pending_idx = core::usize::MAX;
2315                 let mut htlc_value_msat = 0;
2316                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2317                         if htlc.htlc_id == htlc_id_arg {
2318                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2319                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2320                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2321                                 match htlc.state {
2322                                         InboundHTLCState::Committed => {},
2323                                         InboundHTLCState::LocalRemoved(ref reason) => {
2324                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2325                                                 } else {
2326                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2327                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2328                                                 }
2329                                                 return UpdateFulfillFetch::DuplicateClaim {};
2330                                         },
2331                                         _ => {
2332                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2333                                                 // Don't return in release mode here so that we can update channel_monitor
2334                                         }
2335                                 }
2336                                 pending_idx = idx;
2337                                 htlc_value_msat = htlc.amount_msat;
2338                                 break;
2339                         }
2340                 }
2341                 if pending_idx == core::usize::MAX {
2342                         #[cfg(any(test, fuzzing))]
2343                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2344                         // this is simply a duplicate claim, not previously failed and we lost funds.
2345                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2346                         return UpdateFulfillFetch::DuplicateClaim {};
2347                 }
2348
2349                 // Now update local state:
2350                 //
2351                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2352                 // can claim it even if the channel hits the chain before we see their next commitment.
2353                 self.context.latest_monitor_update_id += 1;
2354                 let monitor_update = ChannelMonitorUpdate {
2355                         update_id: self.context.latest_monitor_update_id,
2356                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2357                                 payment_preimage: payment_preimage_arg.clone(),
2358                         }],
2359                 };
2360
2361                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2362                         // Note that this condition is the same as the assertion in
2363                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2364                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2365                         // do not not get into this branch.
2366                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2367                                 match pending_update {
2368                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2369                                                 if htlc_id_arg == htlc_id {
2370                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
2371                                                         self.context.latest_monitor_update_id -= 1;
2372                                                         #[cfg(any(test, fuzzing))]
2373                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2374                                                         return UpdateFulfillFetch::DuplicateClaim {};
2375                                                 }
2376                                         },
2377                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2378                                                 if htlc_id_arg == htlc_id {
2379                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2380                                                         // TODO: We may actually be able to switch to a fulfill here, though its
2381                                                         // rare enough it may not be worth the complexity burden.
2382                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2383                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2384                                                 }
2385                                         },
2386                                         _ => {}
2387                                 }
2388                         }
2389                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2390                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2391                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2392                         });
2393                         #[cfg(any(test, fuzzing))]
2394                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2395                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2396                 }
2397                 #[cfg(any(test, fuzzing))]
2398                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2399
2400                 {
2401                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2402                         if let InboundHTLCState::Committed = htlc.state {
2403                         } else {
2404                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2405                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2406                         }
2407                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2408                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2409                 }
2410
2411                 UpdateFulfillFetch::NewClaim {
2412                         monitor_update,
2413                         htlc_value_msat,
2414                         msg: Some(msgs::UpdateFulfillHTLC {
2415                                 channel_id: self.context.channel_id(),
2416                                 htlc_id: htlc_id_arg,
2417                                 payment_preimage: payment_preimage_arg,
2418                         }),
2419                 }
2420         }
2421
2422         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2423                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2424                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2425                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2426                                 // Even if we aren't supposed to let new monitor updates with commitment state
2427                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2428                                 // matter what. Sadly, to push a new monitor update which flies before others
2429                                 // already queued, we have to insert it into the pending queue and update the
2430                                 // update_ids of all the following monitors.
2431                                 if release_cs_monitor && msg.is_some() {
2432                                         let mut additional_update = self.build_commitment_no_status_check(logger);
2433                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
2434                                         // to be strictly increasing by one, so decrement it here.
2435                                         self.context.latest_monitor_update_id = monitor_update.update_id;
2436                                         monitor_update.updates.append(&mut additional_update.updates);
2437                                 } else {
2438                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
2439                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2440                                         monitor_update.update_id = new_mon_id;
2441                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
2442                                                 held_update.update.update_id += 1;
2443                                         }
2444                                         if msg.is_some() {
2445                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2446                                                 let update = self.build_commitment_no_status_check(logger);
2447                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2448                                                         update,
2449                                                 });
2450                                         }
2451                                 }
2452
2453                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2454                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2455                         },
2456                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2457                 }
2458         }
2459
2460         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2461         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2462         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2463         /// before we fail backwards.
2464         ///
2465         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2466         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2467         /// [`ChannelError::Ignore`].
2468         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2469         -> Result<(), ChannelError> where L::Target: Logger {
2470                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2471                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2472         }
2473
2474         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2475         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2476         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2477         /// before we fail backwards.
2478         ///
2479         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2480         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2481         /// [`ChannelError::Ignore`].
2482         fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2483         -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2484                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2485                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
2486                 }
2487                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2488
2489                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2490                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2491                 // these, but for now we just have to treat them as normal.
2492
2493                 let mut pending_idx = core::usize::MAX;
2494                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2495                         if htlc.htlc_id == htlc_id_arg {
2496                                 match htlc.state {
2497                                         InboundHTLCState::Committed => {},
2498                                         InboundHTLCState::LocalRemoved(ref reason) => {
2499                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2500                                                 } else {
2501                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2502                                                 }
2503                                                 return Ok(None);
2504                                         },
2505                                         _ => {
2506                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2507                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2508                                         }
2509                                 }
2510                                 pending_idx = idx;
2511                         }
2512                 }
2513                 if pending_idx == core::usize::MAX {
2514                         #[cfg(any(test, fuzzing))]
2515                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2516                         // is simply a duplicate fail, not previously failed and we failed-back too early.
2517                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2518                         return Ok(None);
2519                 }
2520
2521                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2522                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2523                         force_holding_cell = true;
2524                 }
2525
2526                 // Now update local state:
2527                 if force_holding_cell {
2528                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2529                                 match pending_update {
2530                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2531                                                 if htlc_id_arg == htlc_id {
2532                                                         #[cfg(any(test, fuzzing))]
2533                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2534                                                         return Ok(None);
2535                                                 }
2536                                         },
2537                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2538                                                 if htlc_id_arg == htlc_id {
2539                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2540                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2541                                                 }
2542                                         },
2543                                         _ => {}
2544                                 }
2545                         }
2546                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2547                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2548                                 htlc_id: htlc_id_arg,
2549                                 err_packet,
2550                         });
2551                         return Ok(None);
2552                 }
2553
2554                 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2555                 {
2556                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2557                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2558                 }
2559
2560                 Ok(Some(msgs::UpdateFailHTLC {
2561                         channel_id: self.context.channel_id(),
2562                         htlc_id: htlc_id_arg,
2563                         reason: err_packet
2564                 }))
2565         }
2566
2567         // Message handlers:
2568
2569         /// Handles a funding_signed message from the remote end.
2570         /// If this call is successful, broadcast the funding transaction (and not before!)
2571         pub fn funding_signed<L: Deref>(
2572                 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2573         ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2574         where
2575                 L::Target: Logger
2576         {
2577                 if !self.context.is_outbound() {
2578                         return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2579                 }
2580                 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2581                         return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2582                 }
2583                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2584                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2585                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2586                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2587                 }
2588
2589                 let funding_script = self.context.get_funding_redeemscript();
2590
2591                 let counterparty_keys = self.context.build_remote_transaction_keys();
2592                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2593                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2594                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2595
2596                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2597                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2598
2599                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2600                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2601                 {
2602                         let trusted_tx = initial_commitment_tx.trust();
2603                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2604                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2605                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2606                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2607                                 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2608                         }
2609                 }
2610
2611                 let holder_commitment_tx = HolderCommitmentTransaction::new(
2612                         initial_commitment_tx,
2613                         msg.signature,
2614                         Vec::new(),
2615                         &self.context.get_holder_pubkeys().funding_pubkey,
2616                         self.context.counterparty_funding_pubkey()
2617                 );
2618
2619                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2620                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2621
2622
2623                 let funding_redeemscript = self.context.get_funding_redeemscript();
2624                 let funding_txo = self.context.get_funding_txo().unwrap();
2625                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2626                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2627                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2628                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2629                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2630                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2631                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
2632                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
2633                                                           &self.context.channel_transaction_parameters,
2634                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
2635                                                           obscure_factor,
2636                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
2637
2638                 channel_monitor.provide_initial_counterparty_commitment_tx(
2639                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
2640                         self.context.cur_counterparty_commitment_transaction_number,
2641                         self.context.counterparty_cur_commitment_point.unwrap(),
2642                         counterparty_initial_commitment_tx.feerate_per_kw(),
2643                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2644                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2645
2646                 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2647                 if self.context.is_batch_funding() {
2648                         self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2649                 } else {
2650                         self.context.channel_state = ChannelState::FundingSent as u32;
2651                 }
2652                 self.context.cur_holder_commitment_transaction_number -= 1;
2653                 self.context.cur_counterparty_commitment_transaction_number -= 1;
2654
2655                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2656
2657                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2658                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2659                 Ok(channel_monitor)
2660         }
2661
2662         /// Updates the state of the channel to indicate that all channels in the batch have received
2663         /// funding_signed and persisted their monitors.
2664         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2665         /// treated as a non-batch channel going forward.
2666         pub fn set_batch_ready(&mut self) {
2667                 self.context.is_batch_funding = None;
2668                 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2669         }
2670
2671         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2672         /// and the channel is now usable (and public), this may generate an announcement_signatures to
2673         /// reply with.
2674         pub fn channel_ready<NS: Deref, L: Deref>(
2675                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2676                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2677         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2678         where
2679                 NS::Target: NodeSigner,
2680                 L::Target: Logger
2681         {
2682                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2683                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2684                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2685                 }
2686
2687                 if let Some(scid_alias) = msg.short_channel_id_alias {
2688                         if Some(scid_alias) != self.context.short_channel_id {
2689                                 // The scid alias provided can be used to route payments *from* our counterparty,
2690                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
2691                                 // when routing outbound payments.
2692                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
2693                         }
2694                 }
2695
2696                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2697
2698                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2699                 // batch, but we can receive channel_ready messages.
2700                 debug_assert!(
2701                         non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2702                         non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2703                 );
2704                 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2705                         self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2706                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2707                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2708                         self.context.update_time_counter += 1;
2709                 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2710                         // If we reconnected before sending our `channel_ready` they may still resend theirs:
2711                         (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2712                                               (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2713                 {
2714                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
2715                         // required, or they're sending a fresh SCID alias.
2716                         let expected_point =
2717                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2718                                         // If they haven't ever sent an updated point, the point they send should match
2719                                         // the current one.
2720                                         self.context.counterparty_cur_commitment_point
2721                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2722                                         // If we've advanced the commitment number once, the second commitment point is
2723                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
2724                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2725                                         self.context.counterparty_prev_commitment_point
2726                                 } else {
2727                                         // If they have sent updated points, channel_ready is always supposed to match
2728                                         // their "first" point, which we re-derive here.
2729                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2730                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2731                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
2732                                 };
2733                         if expected_point != Some(msg.next_per_commitment_point) {
2734                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2735                         }
2736                         return Ok(None);
2737                 } else {
2738                         return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2739                 }
2740
2741                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2742                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2743
2744                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2745
2746                 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2747         }
2748
2749         pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2750                 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2751                 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2752         ) -> Result<(), ChannelError>
2753         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2754                 FE::Target: FeeEstimator, L::Target: Logger,
2755         {
2756                 // We can't accept HTLCs sent after we've sent a shutdown.
2757                 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2758                 if local_sent_shutdown {
2759                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2760                 }
2761                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2762                 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2763                 if remote_sent_shutdown {
2764                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2765                 }
2766                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2767                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2768                 }
2769                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2770                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2771                 }
2772                 if msg.amount_msat == 0 {
2773                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2774                 }
2775                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2776                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2777                 }
2778
2779                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2780                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2781                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2782                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2783                 }
2784                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2785                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2786                 }
2787
2788                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2789                 // the reserve_satoshis we told them to always have as direct payment so that they lose
2790                 // something if we punish them for broadcasting an old state).
2791                 // Note that we don't really care about having a small/no to_remote output in our local
2792                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2793                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2794                 // present in the next commitment transaction we send them (at least for fulfilled ones,
2795                 // failed ones won't modify value_to_self).
2796                 // Note that we will send HTLCs which another instance of rust-lightning would think
2797                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2798                 // Channel state once they will not be present in the next received commitment
2799                 // transaction).
2800                 let mut removed_outbound_total_msat = 0;
2801                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2802                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2803                                 removed_outbound_total_msat += htlc.amount_msat;
2804                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2805                                 removed_outbound_total_msat += htlc.amount_msat;
2806                         }
2807                 }
2808
2809                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2810                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2811                         (0, 0)
2812                 } else {
2813                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2814                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2815                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2816                 };
2817                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2818                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2819                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2820                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2821                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2822                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2823                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2824                         }
2825                 }
2826
2827                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2828                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2829                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2830                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2831                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2832                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2833                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2834                         }
2835                 }
2836
2837                 let pending_value_to_self_msat =
2838                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2839                 let pending_remote_value_msat =
2840                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2841                 if pending_remote_value_msat < msg.amount_msat {
2842                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2843                 }
2844
2845                 // Check that the remote can afford to pay for this HTLC on-chain at the current
2846                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2847                 {
2848                         let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2849                                 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2850                                 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2851                         };
2852                         let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2853                                 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2854                         } else {
2855                                 0
2856                         };
2857                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2858                                 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2859                         };
2860                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2861                                 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2862                         }
2863                 }
2864
2865                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2866                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2867                 } else {
2868                         0
2869                 };
2870                 if !self.context.is_outbound() {
2871                         // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2872                         // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2873                         // side, only on the sender's. Note that with anchor outputs we are no longer as
2874                         // sensitive to fee spikes, so we need to account for them.
2875                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2876                         let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2877                         if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2878                                 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2879                         }
2880                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2881                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2882                                 // the HTLC, i.e. its status is already set to failing.
2883                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2884                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2885                         }
2886                 } else {
2887                         // Check that they won't violate our local required channel reserve by adding this HTLC.
2888                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2889                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2890                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2891                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2892                         }
2893                 }
2894                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2895                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2896                 }
2897                 if msg.cltv_expiry >= 500000000 {
2898                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2899                 }
2900
2901                 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2902                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2903                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2904                         }
2905                 }
2906
2907                 // Now update local state:
2908                 self.context.next_counterparty_htlc_id += 1;
2909                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2910                         htlc_id: msg.htlc_id,
2911                         amount_msat: msg.amount_msat,
2912                         payment_hash: msg.payment_hash,
2913                         cltv_expiry: msg.cltv_expiry,
2914                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
2915                 });
2916                 Ok(())
2917         }
2918
2919         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2920         #[inline]
2921         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
2922                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
2923                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
2924                         if htlc.htlc_id == htlc_id {
2925                                 let outcome = match check_preimage {
2926                                         None => fail_reason.into(),
2927                                         Some(payment_preimage) => {
2928                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
2929                                                 if payment_hash != htlc.payment_hash {
2930                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
2931                                                 }
2932                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
2933                                         }
2934                                 };
2935                                 match htlc.state {
2936                                         OutboundHTLCState::LocalAnnounced(_) =>
2937                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
2938                                         OutboundHTLCState::Committed => {
2939                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
2940                                         },
2941                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
2942                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
2943                                 }
2944                                 return Ok(htlc);
2945                         }
2946                 }
2947                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
2948         }
2949
2950         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
2951                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2952                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
2953                 }
2954                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2955                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
2956                 }
2957
2958                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
2959         }
2960
2961         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2962                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2963                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
2964                 }
2965                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2966                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
2967                 }
2968
2969                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2970                 Ok(())
2971         }
2972
2973         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2974                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2975                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
2976                 }
2977                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2978                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
2979                 }
2980
2981                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2982                 Ok(())
2983         }
2984
2985         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
2986                 where L::Target: Logger
2987         {
2988                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2989                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
2990                 }
2991                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2992                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
2993                 }
2994                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
2995                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
2996                 }
2997
2998                 let funding_script = self.context.get_funding_redeemscript();
2999
3000                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3001
3002                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3003                 let commitment_txid = {
3004                         let trusted_tx = commitment_stats.tx.trust();
3005                         let bitcoin_tx = trusted_tx.built_transaction();
3006                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3007
3008                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3009                                 log_bytes!(msg.signature.serialize_compact()[..]),
3010                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3011                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3012                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3013                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3014                         }
3015                         bitcoin_tx.txid
3016                 };
3017                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3018
3019                 // If our counterparty updated the channel fee in this commitment transaction, check that
3020                 // they can actually afford the new fee now.
3021                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3022                         update_state == FeeUpdateState::RemoteAnnounced
3023                 } else { false };
3024                 if update_fee {
3025                         debug_assert!(!self.context.is_outbound());
3026                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3027                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3028                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3029                         }
3030                 }
3031                 #[cfg(any(test, fuzzing))]
3032                 {
3033                         if self.context.is_outbound() {
3034                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3035                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3036                                 if let Some(info) = projected_commit_tx_info {
3037                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3038                                                 + self.context.holding_cell_htlc_updates.len();
3039                                         if info.total_pending_htlcs == total_pending_htlcs
3040                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3041                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3042                                                 && info.feerate == self.context.feerate_per_kw {
3043                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3044                                                 }
3045                                 }
3046                         }
3047                 }
3048
3049                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3050                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3051                 }
3052
3053                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3054                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3055                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3056                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3057                 // backwards compatibility, we never use it in production. To provide test coverage, here,
3058                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3059                 #[allow(unused_assignments, unused_mut)]
3060                 let mut separate_nondust_htlc_sources = false;
3061                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3062                         use core::hash::{BuildHasher, Hasher};
3063                         // Get a random value using the only std API to do so - the DefaultHasher
3064                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3065                         separate_nondust_htlc_sources = rand_val % 2 == 0;
3066                 }
3067
3068                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3069                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3070                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3071                         if let Some(_) = htlc.transaction_output_index {
3072                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3073                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3074                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3075
3076                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3077                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3078                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3079                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3080                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3081                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3082                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3083                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3084                                 }
3085                                 if !separate_nondust_htlc_sources {
3086                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3087                                 }
3088                         } else {
3089                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3090                         }
3091                         if separate_nondust_htlc_sources {
3092                                 if let Some(source) = source_opt.take() {
3093                                         nondust_htlc_sources.push(source);
3094                                 }
3095                         }
3096                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3097                 }
3098
3099                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3100                         commitment_stats.tx,
3101                         msg.signature,
3102                         msg.htlc_signatures.clone(),
3103                         &self.context.get_holder_pubkeys().funding_pubkey,
3104                         self.context.counterparty_funding_pubkey()
3105                 );
3106
3107                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3108                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3109
3110                 // Update state now that we've passed all the can-fail calls...
3111                 let mut need_commitment = false;
3112                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3113                         if *update_state == FeeUpdateState::RemoteAnnounced {
3114                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3115                                 need_commitment = true;
3116                         }
3117                 }
3118
3119                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3120                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3121                                 Some(forward_info.clone())
3122                         } else { None };
3123                         if let Some(forward_info) = new_forward {
3124                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3125                                         &htlc.payment_hash, &self.context.channel_id);
3126                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3127                                 need_commitment = true;
3128                         }
3129                 }
3130                 let mut claimed_htlcs = Vec::new();
3131                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3132                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3133                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3134                                         &htlc.payment_hash, &self.context.channel_id);
3135                                 // Grab the preimage, if it exists, instead of cloning
3136                                 let mut reason = OutboundHTLCOutcome::Success(None);
3137                                 mem::swap(outcome, &mut reason);
3138                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3139                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3140                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3141                                         // have a `Success(None)` reason. In this case we could forget some HTLC
3142                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
3143                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
3144                                         // claim anyway.
3145                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3146                                 }
3147                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3148                                 need_commitment = true;
3149                         }
3150                 }
3151
3152                 self.context.latest_monitor_update_id += 1;
3153                 let mut monitor_update = ChannelMonitorUpdate {
3154                         update_id: self.context.latest_monitor_update_id,
3155                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3156                                 commitment_tx: holder_commitment_tx,
3157                                 htlc_outputs: htlcs_and_sigs,
3158                                 claimed_htlcs,
3159                                 nondust_htlc_sources,
3160                         }]
3161                 };
3162
3163                 self.context.cur_holder_commitment_transaction_number -= 1;
3164                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3165                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3166                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3167
3168                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3169                         // In case we initially failed monitor updating without requiring a response, we need
3170                         // to make sure the RAA gets sent first.
3171                         self.context.monitor_pending_revoke_and_ack = true;
3172                         if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3173                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3174                                 // the corresponding HTLC status updates so that
3175                                 // get_last_commitment_update_for_send includes the right HTLCs.
3176                                 self.context.monitor_pending_commitment_signed = true;
3177                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3178                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3179                                 // strictly increasing by one, so decrement it here.
3180                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3181                                 monitor_update.updates.append(&mut additional_update.updates);
3182                         }
3183                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3184                                 &self.context.channel_id);
3185                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
3186                 }
3187
3188                 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3189                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3190                         // we'll send one right away when we get the revoke_and_ack when we
3191                         // free_holding_cell_htlcs().
3192                         let mut additional_update = self.build_commitment_no_status_check(logger);
3193                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3194                         // strictly increasing by one, so decrement it here.
3195                         self.context.latest_monitor_update_id = monitor_update.update_id;
3196                         monitor_update.updates.append(&mut additional_update.updates);
3197                         true
3198                 } else { false };
3199
3200                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3201                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3202                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3203                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3204         }
3205
3206         /// Public version of the below, checking relevant preconditions first.
3207         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3208         /// returns `(None, Vec::new())`.
3209         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3210                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3211         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3212         where F::Target: FeeEstimator, L::Target: Logger
3213         {
3214                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3215                    (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3216                         self.free_holding_cell_htlcs(fee_estimator, logger)
3217                 } else { (None, Vec::new()) }
3218         }
3219
3220         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3221         /// for our counterparty.
3222         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3223                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3224         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3225         where F::Target: FeeEstimator, L::Target: Logger
3226         {
3227                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3228                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3229                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3230                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3231
3232                         let mut monitor_update = ChannelMonitorUpdate {
3233                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3234                                 updates: Vec::new(),
3235                         };
3236
3237                         let mut htlc_updates = Vec::new();
3238                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3239                         let mut update_add_count = 0;
3240                         let mut update_fulfill_count = 0;
3241                         let mut update_fail_count = 0;
3242                         let mut htlcs_to_fail = Vec::new();
3243                         for htlc_update in htlc_updates.drain(..) {
3244                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
3245                                 // fee races with adding too many outputs which push our total payments just over
3246                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
3247                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3248                                 // to rebalance channels.
3249                                 match &htlc_update {
3250                                         &HTLCUpdateAwaitingACK::AddHTLC {
3251                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3252                                                 skimmed_fee_msat, ..
3253                                         } => {
3254                                                 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3255                                                         onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3256                                                 {
3257                                                         Ok(_) => update_add_count += 1,
3258                                                         Err(e) => {
3259                                                                 match e {
3260                                                                         ChannelError::Ignore(ref msg) => {
3261                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3262                                                                                 // If we fail to send here, then this HTLC should
3263                                                                                 // be failed backwards. Failing to send here
3264                                                                                 // indicates that this HTLC may keep being put back
3265                                                                                 // into the holding cell without ever being
3266                                                                                 // successfully forwarded/failed/fulfilled, causing
3267                                                                                 // our counterparty to eventually close on us.
3268                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
3269                                                                         },
3270                                                                         _ => {
3271                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3272                                                                         },
3273                                                                 }
3274                                                         }
3275                                                 }
3276                                         },
3277                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3278                                                 // If an HTLC claim was previously added to the holding cell (via
3279                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
3280                                                 // not fail - any in between attempts to claim the HTLC will have resulted
3281                                                 // in it hitting the holding cell again and we cannot change the state of a
3282                                                 // holding cell HTLC from fulfill to anything else.
3283                                                 let mut additional_monitor_update =
3284                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3285                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3286                                                         { monitor_update } else { unreachable!() };
3287                                                 update_fulfill_count += 1;
3288                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
3289                                         },
3290                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3291                                                 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3292                                                         Ok(update_fail_msg_option) => {
3293                                                                 // If an HTLC failure was previously added to the holding cell (via
3294                                                                 // `queue_fail_htlc`) then generating the fail message itself must
3295                                                                 // not fail - we should never end up in a state where we double-fail
3296                                                                 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3297                                                                 // for a full revocation before failing.
3298                                                                 debug_assert!(update_fail_msg_option.is_some());
3299                                                                 update_fail_count += 1;
3300                                                         },
3301                                                         Err(e) => {
3302                                                                 if let ChannelError::Ignore(_) = e {}
3303                                                                 else {
3304                                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3305                                                                 }
3306                                                         }
3307                                                 }
3308                                         },
3309                                 }
3310                         }
3311                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3312                                 return (None, htlcs_to_fail);
3313                         }
3314                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3315                                 self.send_update_fee(feerate, false, fee_estimator, logger)
3316                         } else {
3317                                 None
3318                         };
3319
3320                         let mut additional_update = self.build_commitment_no_status_check(logger);
3321                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3322                         // but we want them to be strictly increasing by one, so reset it here.
3323                         self.context.latest_monitor_update_id = monitor_update.update_id;
3324                         monitor_update.updates.append(&mut additional_update.updates);
3325
3326                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3327                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3328                                 update_add_count, update_fulfill_count, update_fail_count);
3329
3330                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3331                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3332                 } else {
3333                         (None, Vec::new())
3334                 }
3335         }
3336
3337         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3338         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3339         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3340         /// generating an appropriate error *after* the channel state has been updated based on the
3341         /// revoke_and_ack message.
3342         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3343                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3344         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3345         where F::Target: FeeEstimator, L::Target: Logger,
3346         {
3347                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3348                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3349                 }
3350                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3351                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3352                 }
3353                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3354                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3355                 }
3356
3357                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3358
3359                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3360                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3361                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3362                         }
3363                 }
3364
3365                 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3366                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
3367                         // haven't given them a new commitment transaction to broadcast). We should probably
3368                         // take advantage of this by updating our channel monitor, sending them an error, and
3369                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3370                         // lot of work, and there's some chance this is all a misunderstanding anyway.
3371                         // We have to do *something*, though, since our signer may get mad at us for otherwise
3372                         // jumping a remote commitment number, so best to just force-close and move on.
3373                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3374                 }
3375
3376                 #[cfg(any(test, fuzzing))]
3377                 {
3378                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3379                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3380                 }
3381
3382                 match &self.context.holder_signer {
3383                         ChannelSignerType::Ecdsa(ecdsa) => {
3384                                 ecdsa.validate_counterparty_revocation(
3385                                         self.context.cur_counterparty_commitment_transaction_number + 1,
3386                                         &secret
3387                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3388                         }
3389                 };
3390
3391                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3392                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3393                 self.context.latest_monitor_update_id += 1;
3394                 let mut monitor_update = ChannelMonitorUpdate {
3395                         update_id: self.context.latest_monitor_update_id,
3396                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3397                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3398                                 secret: msg.per_commitment_secret,
3399                         }],
3400                 };
3401
3402                 // Update state now that we've passed all the can-fail calls...
3403                 // (note that we may still fail to generate the new commitment_signed message, but that's
3404                 // OK, we step the channel here and *then* if the new generation fails we can fail the
3405                 // channel based on that, but stepping stuff here should be safe either way.
3406                 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3407                 self.context.sent_message_awaiting_response = None;
3408                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3409                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3410                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3411
3412                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3413                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3414                 }
3415
3416                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3417                 let mut to_forward_infos = Vec::new();
3418                 let mut revoked_htlcs = Vec::new();
3419                 let mut finalized_claimed_htlcs = Vec::new();
3420                 let mut update_fail_htlcs = Vec::new();
3421                 let mut update_fail_malformed_htlcs = Vec::new();
3422                 let mut require_commitment = false;
3423                 let mut value_to_self_msat_diff: i64 = 0;
3424
3425                 {
3426                         // Take references explicitly so that we can hold multiple references to self.context.
3427                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3428                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3429
3430                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3431                         pending_inbound_htlcs.retain(|htlc| {
3432                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3433                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3434                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3435                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
3436                                         }
3437                                         false
3438                                 } else { true }
3439                         });
3440                         pending_outbound_htlcs.retain(|htlc| {
3441                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3442                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3443                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3444                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3445                                         } else {
3446                                                 finalized_claimed_htlcs.push(htlc.source.clone());
3447                                                 // They fulfilled, so we sent them money
3448                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
3449                                         }
3450                                         false
3451                                 } else { true }
3452                         });
3453                         for htlc in pending_inbound_htlcs.iter_mut() {
3454                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3455                                         true
3456                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3457                                         true
3458                                 } else { false };
3459                                 if swap {
3460                                         let mut state = InboundHTLCState::Committed;
3461                                         mem::swap(&mut state, &mut htlc.state);
3462
3463                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3464                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3465                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3466                                                 require_commitment = true;
3467                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3468                                                 match forward_info {
3469                                                         PendingHTLCStatus::Fail(fail_msg) => {
3470                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3471                                                                 require_commitment = true;
3472                                                                 match fail_msg {
3473                                                                         HTLCFailureMsg::Relay(msg) => {
3474                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3475                                                                                 update_fail_htlcs.push(msg)
3476                                                                         },
3477                                                                         HTLCFailureMsg::Malformed(msg) => {
3478                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3479                                                                                 update_fail_malformed_htlcs.push(msg)
3480                                                                         },
3481                                                                 }
3482                                                         },
3483                                                         PendingHTLCStatus::Forward(forward_info) => {
3484                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3485                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
3486                                                                 htlc.state = InboundHTLCState::Committed;
3487                                                         }
3488                                                 }
3489                                         }
3490                                 }
3491                         }
3492                         for htlc in pending_outbound_htlcs.iter_mut() {
3493                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3494                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3495                                         htlc.state = OutboundHTLCState::Committed;
3496                                 }
3497                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3498                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3499                                         // Grab the preimage, if it exists, instead of cloning
3500                                         let mut reason = OutboundHTLCOutcome::Success(None);
3501                                         mem::swap(outcome, &mut reason);
3502                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3503                                         require_commitment = true;
3504                                 }
3505                         }
3506                 }
3507                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3508
3509                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3510                         match update_state {
3511                                 FeeUpdateState::Outbound => {
3512                                         debug_assert!(self.context.is_outbound());
3513                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3514                                         self.context.feerate_per_kw = feerate;
3515                                         self.context.pending_update_fee = None;
3516                                 },
3517                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3518                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3519                                         debug_assert!(!self.context.is_outbound());
3520                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3521                                         require_commitment = true;
3522                                         self.context.feerate_per_kw = feerate;
3523                                         self.context.pending_update_fee = None;
3524                                 },
3525                         }
3526                 }
3527
3528                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3529                 let release_state_str =
3530                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3531                 macro_rules! return_with_htlcs_to_fail {
3532                         ($htlcs_to_fail: expr) => {
3533                                 if !release_monitor {
3534                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3535                                                 update: monitor_update,
3536                                         });
3537                                         return Ok(($htlcs_to_fail, None));
3538                                 } else {
3539                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
3540                                 }
3541                         }
3542                 }
3543
3544                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3545                         // We can't actually generate a new commitment transaction (incl by freeing holding
3546                         // cells) while we can't update the monitor, so we just return what we have.
3547                         if require_commitment {
3548                                 self.context.monitor_pending_commitment_signed = true;
3549                                 // When the monitor updating is restored we'll call
3550                                 // get_last_commitment_update_for_send(), which does not update state, but we're
3551                                 // definitely now awaiting a remote revoke before we can step forward any more, so
3552                                 // set it here.
3553                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3554                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3555                                 // strictly increasing by one, so decrement it here.
3556                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3557                                 monitor_update.updates.append(&mut additional_update.updates);
3558                         }
3559                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3560                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3561                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3562                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3563                         return_with_htlcs_to_fail!(Vec::new());
3564                 }
3565
3566                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3567                         (Some(mut additional_update), htlcs_to_fail) => {
3568                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3569                                 // strictly increasing by one, so decrement it here.
3570                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3571                                 monitor_update.updates.append(&mut additional_update.updates);
3572
3573                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3574                                         &self.context.channel_id(), release_state_str);
3575
3576                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3577                                 return_with_htlcs_to_fail!(htlcs_to_fail);
3578                         },
3579                         (None, htlcs_to_fail) => {
3580                                 if require_commitment {
3581                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3582
3583                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3584                                         // strictly increasing by one, so decrement it here.
3585                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3586                                         monitor_update.updates.append(&mut additional_update.updates);
3587
3588                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3589                                                 &self.context.channel_id(),
3590                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3591                                                 release_state_str);
3592
3593                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3594                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3595                                 } else {
3596                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3597                                                 &self.context.channel_id(), release_state_str);
3598
3599                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3600                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3601                                 }
3602                         }
3603                 }
3604         }
3605
3606         /// Queues up an outbound update fee by placing it in the holding cell. You should call
3607         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3608         /// commitment update.
3609         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3610                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3611         where F::Target: FeeEstimator, L::Target: Logger
3612         {
3613                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3614                 assert!(msg_opt.is_none(), "We forced holding cell?");
3615         }
3616
3617         /// Adds a pending update to this channel. See the doc for send_htlc for
3618         /// further details on the optionness of the return value.
3619         /// If our balance is too low to cover the cost of the next commitment transaction at the
3620         /// new feerate, the update is cancelled.
3621         ///
3622         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3623         /// [`Channel`] if `force_holding_cell` is false.
3624         fn send_update_fee<F: Deref, L: Deref>(
3625                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3626                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3627         ) -> Option<msgs::UpdateFee>
3628         where F::Target: FeeEstimator, L::Target: Logger
3629         {
3630                 if !self.context.is_outbound() {
3631                         panic!("Cannot send fee from inbound channel");
3632                 }
3633                 if !self.context.is_usable() {
3634                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3635                 }
3636                 if !self.context.is_live() {
3637                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3638                 }
3639
3640                 // Before proposing a feerate update, check that we can actually afford the new fee.
3641                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3642                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3643                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3644                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3645                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3646                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3647                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3648                         //TODO: auto-close after a number of failures?
3649                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3650                         return None;
3651                 }
3652
3653                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3654                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3655                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3656                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3657                 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3658                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3659                         return None;
3660                 }
3661                 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3662                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3663                         return None;
3664                 }
3665
3666                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3667                         force_holding_cell = true;
3668                 }
3669
3670                 if force_holding_cell {
3671                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
3672                         return None;
3673                 }
3674
3675                 debug_assert!(self.context.pending_update_fee.is_none());
3676                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3677
3678                 Some(msgs::UpdateFee {
3679                         channel_id: self.context.channel_id,
3680                         feerate_per_kw,
3681                 })
3682         }
3683
3684         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3685         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3686         /// resent.
3687         /// No further message handling calls may be made until a channel_reestablish dance has
3688         /// completed.
3689         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3690         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3691                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3692                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3693                         return Err(());
3694                 }
3695
3696                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3697                         // While the below code should be idempotent, it's simpler to just return early, as
3698                         // redundant disconnect events can fire, though they should be rare.
3699                         return Ok(());
3700                 }
3701
3702                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3703                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3704                 }
3705
3706                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3707                 // will be retransmitted.
3708                 self.context.last_sent_closing_fee = None;
3709                 self.context.pending_counterparty_closing_signed = None;
3710                 self.context.closing_fee_limits = None;
3711
3712                 let mut inbound_drop_count = 0;
3713                 self.context.pending_inbound_htlcs.retain(|htlc| {
3714                         match htlc.state {
3715                                 InboundHTLCState::RemoteAnnounced(_) => {
3716                                         // They sent us an update_add_htlc but we never got the commitment_signed.
3717                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
3718                                         // this HTLC accordingly
3719                                         inbound_drop_count += 1;
3720                                         false
3721                                 },
3722                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3723                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
3724                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3725                                         // in response to it yet, so don't touch it.
3726                                         true
3727                                 },
3728                                 InboundHTLCState::Committed => true,
3729                                 InboundHTLCState::LocalRemoved(_) => {
3730                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3731                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
3732                                         // (that we missed). Keep this around for now and if they tell us they missed
3733                                         // the commitment_signed we can re-transmit the update then.
3734                                         true
3735                                 },
3736                         }
3737                 });
3738                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3739
3740                 if let Some((_, update_state)) = self.context.pending_update_fee {
3741                         if update_state == FeeUpdateState::RemoteAnnounced {
3742                                 debug_assert!(!self.context.is_outbound());
3743                                 self.context.pending_update_fee = None;
3744                         }
3745                 }
3746
3747                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3748                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3749                                 // They sent us an update to remove this but haven't yet sent the corresponding
3750                                 // commitment_signed, we need to move it back to Committed and they can re-send
3751                                 // the update upon reconnection.
3752                                 htlc.state = OutboundHTLCState::Committed;
3753                         }
3754                 }
3755
3756                 self.context.sent_message_awaiting_response = None;
3757
3758                 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3759                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3760                 Ok(())
3761         }
3762
3763         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3764         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3765         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3766         /// update completes (potentially immediately).
3767         /// The messages which were generated with the monitor update must *not* have been sent to the
3768         /// remote end, and must instead have been dropped. They will be regenerated when
3769         /// [`Self::monitor_updating_restored`] is called.
3770         ///
3771         /// [`ChannelManager`]: super::channelmanager::ChannelManager
3772         /// [`chain::Watch`]: crate::chain::Watch
3773         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3774         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3775                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3776                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3777                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3778         ) {
3779                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3780                 self.context.monitor_pending_commitment_signed |= resend_commitment;
3781                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3782                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3783                 self.context.monitor_pending_failures.append(&mut pending_fails);
3784                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3785                 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3786         }
3787
3788         /// Indicates that the latest ChannelMonitor update has been committed by the client
3789         /// successfully and we should restore normal operation. Returns messages which should be sent
3790         /// to the remote side.
3791         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3792                 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3793                 user_config: &UserConfig, best_block_height: u32
3794         ) -> MonitorRestoreUpdates
3795         where
3796                 L::Target: Logger,
3797                 NS::Target: NodeSigner
3798         {
3799                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3800                 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3801
3802                 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3803                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3804                 // first received the funding_signed.
3805                 let mut funding_broadcastable =
3806                         if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3807                                 self.context.funding_transaction.take()
3808                         } else { None };
3809                 // That said, if the funding transaction is already confirmed (ie we're active with a
3810                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3811                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3812                         funding_broadcastable = None;
3813                 }
3814
3815                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3816                 // (and we assume the user never directly broadcasts the funding transaction and waits for
3817                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3818                 // * an inbound channel that failed to persist the monitor on funding_created and we got
3819                 //   the funding transaction confirmed before the monitor was persisted, or
3820                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3821                 let channel_ready = if self.context.monitor_pending_channel_ready {
3822                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3823                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3824                         self.context.monitor_pending_channel_ready = false;
3825                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3826                         Some(msgs::ChannelReady {
3827                                 channel_id: self.context.channel_id(),
3828                                 next_per_commitment_point,
3829                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3830                         })
3831                 } else { None };
3832
3833                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3834
3835                 let mut accepted_htlcs = Vec::new();
3836                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3837                 let mut failed_htlcs = Vec::new();
3838                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3839                 let mut finalized_claimed_htlcs = Vec::new();
3840                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3841
3842                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3843                         self.context.monitor_pending_revoke_and_ack = false;
3844                         self.context.monitor_pending_commitment_signed = false;
3845                         return MonitorRestoreUpdates {
3846                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3847                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3848                         };
3849                 }
3850
3851                 let raa = if self.context.monitor_pending_revoke_and_ack {
3852                         Some(self.get_last_revoke_and_ack())
3853                 } else { None };
3854                 let commitment_update = if self.context.monitor_pending_commitment_signed {
3855                         self.get_last_commitment_update_for_send(logger).ok()
3856                 } else { None };
3857                 if commitment_update.is_some() {
3858                         self.mark_awaiting_response();
3859                 }
3860
3861                 self.context.monitor_pending_revoke_and_ack = false;
3862                 self.context.monitor_pending_commitment_signed = false;
3863                 let order = self.context.resend_order.clone();
3864                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3865                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3866                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3867                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3868                 MonitorRestoreUpdates {
3869                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3870                 }
3871         }
3872
3873         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3874                 where F::Target: FeeEstimator, L::Target: Logger
3875         {
3876                 if self.context.is_outbound() {
3877                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3878                 }
3879                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3880                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3881                 }
3882                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3883                 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3884
3885                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3886                 self.context.update_time_counter += 1;
3887                 // If the feerate has increased over the previous dust buffer (note that
3888                 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3889                 // won't be pushed over our dust exposure limit by the feerate increase.
3890                 if feerate_over_dust_buffer {
3891                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3892                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3893                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3894                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3895                         let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3896                         if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3897                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3898                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
3899                         }
3900                         if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3901                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3902                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3903                         }
3904                 }
3905                 Ok(())
3906         }
3907
3908         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
3909                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3910                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
3911                 msgs::RevokeAndACK {
3912                         channel_id: self.context.channel_id,
3913                         per_commitment_secret,
3914                         next_per_commitment_point,
3915                         #[cfg(taproot)]
3916                         next_local_nonce: None,
3917                 }
3918         }
3919
3920         /// Gets the last commitment update for immediate sending to our peer.
3921         fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
3922                 let mut update_add_htlcs = Vec::new();
3923                 let mut update_fulfill_htlcs = Vec::new();
3924                 let mut update_fail_htlcs = Vec::new();
3925                 let mut update_fail_malformed_htlcs = Vec::new();
3926
3927                 for htlc in self.context.pending_outbound_htlcs.iter() {
3928                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
3929                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
3930                                         channel_id: self.context.channel_id(),
3931                                         htlc_id: htlc.htlc_id,
3932                                         amount_msat: htlc.amount_msat,
3933                                         payment_hash: htlc.payment_hash,
3934                                         cltv_expiry: htlc.cltv_expiry,
3935                                         onion_routing_packet: (**onion_packet).clone(),
3936                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
3937                                 });
3938                         }
3939                 }
3940
3941                 for htlc in self.context.pending_inbound_htlcs.iter() {
3942                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3943                                 match reason {
3944                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
3945                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
3946                                                         channel_id: self.context.channel_id(),
3947                                                         htlc_id: htlc.htlc_id,
3948                                                         reason: err_packet.clone()
3949                                                 });
3950                                         },
3951                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
3952                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
3953                                                         channel_id: self.context.channel_id(),
3954                                                         htlc_id: htlc.htlc_id,
3955                                                         sha256_of_onion: sha256_of_onion.clone(),
3956                                                         failure_code: failure_code.clone(),
3957                                                 });
3958                                         },
3959                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
3960                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
3961                                                         channel_id: self.context.channel_id(),
3962                                                         htlc_id: htlc.htlc_id,
3963                                                         payment_preimage: payment_preimage.clone(),
3964                                                 });
3965                                         },
3966                                 }
3967                         }
3968                 }
3969
3970                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
3971                         Some(msgs::UpdateFee {
3972                                 channel_id: self.context.channel_id(),
3973                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
3974                         })
3975                 } else { None };
3976
3977                 log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3978                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
3979                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3980                 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
3981                         self.context.signer_pending_commitment_update = false;
3982                         update
3983                 } else {
3984                         self.context.signer_pending_commitment_update = true;
3985                         return Err(());
3986                 };
3987                 Ok(msgs::CommitmentUpdate {
3988                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
3989                         commitment_signed,
3990                 })
3991         }
3992
3993         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
3994         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
3995                 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
3996                         assert!(self.context.shutdown_scriptpubkey.is_some());
3997                         Some(msgs::Shutdown {
3998                                 channel_id: self.context.channel_id,
3999                                 scriptpubkey: self.get_closing_scriptpubkey(),
4000                         })
4001                 } else { None }
4002         }
4003
4004         /// May panic if some calls other than message-handling calls (which will all Err immediately)
4005         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4006         ///
4007         /// Some links printed in log lines are included here to check them during build (when run with
4008         /// `cargo doc --document-private-items`):
4009         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4010         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4011         pub fn channel_reestablish<L: Deref, NS: Deref>(
4012                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4013                 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4014         ) -> Result<ReestablishResponses, ChannelError>
4015         where
4016                 L::Target: Logger,
4017                 NS::Target: NodeSigner
4018         {
4019                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4020                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4021                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
4022                         // just close here instead of trying to recover.
4023                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4024                 }
4025
4026                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4027                         msg.next_local_commitment_number == 0 {
4028                         return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4029                 }
4030
4031                 if msg.next_remote_commitment_number > 0 {
4032                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4033                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4034                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4035                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4036                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4037                         }
4038                         if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4039                                 macro_rules! log_and_panic {
4040                                         ($err_msg: expr) => {
4041                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4042                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4043                                         }
4044                                 }
4045                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4046                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4047                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4048                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4049                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4050                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4051                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4052                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4053                         }
4054                 }
4055
4056                 // Before we change the state of the channel, we check if the peer is sending a very old
4057                 // commitment transaction number, if yes we send a warning message.
4058                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4059                 if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4060                         return Err(
4061                                 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4062                         );
4063                 }
4064
4065                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4066                 // remaining cases either succeed or ErrorMessage-fail).
4067                 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4068                 self.context.sent_message_awaiting_response = None;
4069
4070                 let shutdown_msg = self.get_outbound_shutdown();
4071
4072                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4073
4074                 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4075                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4076                         if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4077                                         self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4078                                 if msg.next_remote_commitment_number != 0 {
4079                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4080                                 }
4081                                 // Short circuit the whole handler as there is nothing we can resend them
4082                                 return Ok(ReestablishResponses {
4083                                         channel_ready: None,
4084                                         raa: None, commitment_update: None,
4085                                         order: RAACommitmentOrder::CommitmentFirst,
4086                                         shutdown_msg, announcement_sigs,
4087                                 });
4088                         }
4089
4090                         // We have OurChannelReady set!
4091                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4092                         return Ok(ReestablishResponses {
4093                                 channel_ready: Some(msgs::ChannelReady {
4094                                         channel_id: self.context.channel_id(),
4095                                         next_per_commitment_point,
4096                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
4097                                 }),
4098                                 raa: None, commitment_update: None,
4099                                 order: RAACommitmentOrder::CommitmentFirst,
4100                                 shutdown_msg, announcement_sigs,
4101                         });
4102                 }
4103
4104                 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4105                         // Remote isn't waiting on any RevokeAndACK from us!
4106                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4107                         None
4108                 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4109                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4110                                 self.context.monitor_pending_revoke_and_ack = true;
4111                                 None
4112                         } else {
4113                                 Some(self.get_last_revoke_and_ack())
4114                         }
4115                 } else {
4116                         return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4117                 };
4118
4119                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4120                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4121                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4122                 // the corresponding revoke_and_ack back yet.
4123                 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4124                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4125                         self.mark_awaiting_response();
4126                 }
4127                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4128
4129                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4130                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4131                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4132                         Some(msgs::ChannelReady {
4133                                 channel_id: self.context.channel_id(),
4134                                 next_per_commitment_point,
4135                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4136                         })
4137                 } else { None };
4138
4139                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4140                         if required_revoke.is_some() {
4141                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4142                         } else {
4143                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4144                         }
4145
4146                         Ok(ReestablishResponses {
4147                                 channel_ready, shutdown_msg, announcement_sigs,
4148                                 raa: required_revoke,
4149                                 commitment_update: None,
4150                                 order: self.context.resend_order.clone(),
4151                         })
4152                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4153                         if required_revoke.is_some() {
4154                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4155                         } else {
4156                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4157                         }
4158
4159                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4160                                 self.context.monitor_pending_commitment_signed = true;
4161                                 Ok(ReestablishResponses {
4162                                         channel_ready, shutdown_msg, announcement_sigs,
4163                                         commitment_update: None, raa: None,
4164                                         order: self.context.resend_order.clone(),
4165                                 })
4166                         } else {
4167                                 Ok(ReestablishResponses {
4168                                         channel_ready, shutdown_msg, announcement_sigs,
4169                                         raa: required_revoke,
4170                                         commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4171                                         order: self.context.resend_order.clone(),
4172                                 })
4173                         }
4174                 } else {
4175                         Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4176                 }
4177         }
4178
4179         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4180         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4181         /// at which point they will be recalculated.
4182         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4183                 -> (u64, u64)
4184                 where F::Target: FeeEstimator
4185         {
4186                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4187
4188                 // Propose a range from our current Background feerate to our Normal feerate plus our
4189                 // force_close_avoidance_max_fee_satoshis.
4190                 // If we fail to come to consensus, we'll have to force-close.
4191                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4192                 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4193                 // that we don't expect to need fee bumping
4194                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4195                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4196
4197                 // The spec requires that (when the channel does not have anchors) we only send absolute
4198                 // channel fees no greater than the absolute channel fee on the current commitment
4199                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4200                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4201                 // some force-closure by old nodes, but we wanted to close the channel anyway.
4202
4203                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4204                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4205                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4206                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4207                 }
4208
4209                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4210                 // below our dust limit, causing the output to disappear. We don't bother handling this
4211                 // case, however, as this should only happen if a channel is closed before any (material)
4212                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4213                 // come to consensus with our counterparty on appropriate fees, however it should be a
4214                 // relatively rare case. We can revisit this later, though note that in order to determine
4215                 // if the funders' output is dust we have to know the absolute fee we're going to use.
4216                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4217                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4218                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4219                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
4220                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
4221                                 // target feerate-calculated fee.
4222                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4223                                         proposed_max_feerate as u64 * tx_weight / 1000)
4224                         } else {
4225                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4226                         };
4227
4228                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4229                 self.context.closing_fee_limits.clone().unwrap()
4230         }
4231
4232         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4233         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4234         /// this point if we're the funder we should send the initial closing_signed, and in any case
4235         /// shutdown should complete within a reasonable timeframe.
4236         fn closing_negotiation_ready(&self) -> bool {
4237                 self.context.closing_negotiation_ready()
4238         }
4239
4240         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4241         /// an Err if no progress is being made and the channel should be force-closed instead.
4242         /// Should be called on a one-minute timer.
4243         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4244                 if self.closing_negotiation_ready() {
4245                         if self.context.closing_signed_in_flight {
4246                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4247                         } else {
4248                                 self.context.closing_signed_in_flight = true;
4249                         }
4250                 }
4251                 Ok(())
4252         }
4253
4254         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4255                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4256                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4257                 where F::Target: FeeEstimator, L::Target: Logger
4258         {
4259                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4260                         return Ok((None, None));
4261                 }
4262
4263                 if !self.context.is_outbound() {
4264                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4265                                 return self.closing_signed(fee_estimator, &msg);
4266                         }
4267                         return Ok((None, None));
4268                 }
4269
4270                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4271
4272                 assert!(self.context.shutdown_scriptpubkey.is_some());
4273                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4274                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4275                         our_min_fee, our_max_fee, total_fee_satoshis);
4276
4277                 match &self.context.holder_signer {
4278                         ChannelSignerType::Ecdsa(ecdsa) => {
4279                                 let sig = ecdsa
4280                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4281                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4282
4283                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4284                                 Ok((Some(msgs::ClosingSigned {
4285                                         channel_id: self.context.channel_id,
4286                                         fee_satoshis: total_fee_satoshis,
4287                                         signature: sig,
4288                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4289                                                 min_fee_satoshis: our_min_fee,
4290                                                 max_fee_satoshis: our_max_fee,
4291                                         }),
4292                                 }), None))
4293                         }
4294                 }
4295         }
4296
4297         // Marks a channel as waiting for a response from the counterparty. If it's not received
4298         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4299         // a reconnection.
4300         fn mark_awaiting_response(&mut self) {
4301                 self.context.sent_message_awaiting_response = Some(0);
4302         }
4303
4304         /// Determines whether we should disconnect the counterparty due to not receiving a response
4305         /// within our expected timeframe.
4306         ///
4307         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4308         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4309                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4310                         ticks_elapsed
4311                 } else {
4312                         // Don't disconnect when we're not waiting on a response.
4313                         return false;
4314                 };
4315                 *ticks_elapsed += 1;
4316                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4317         }
4318
4319         pub fn shutdown(
4320                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4321         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4322         {
4323                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4324                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4325                 }
4326                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4327                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
4328                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4329                         // can do that via error message without getting a connection fail anyway...
4330                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4331                 }
4332                 for htlc in self.context.pending_inbound_htlcs.iter() {
4333                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4334                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4335                         }
4336                 }
4337                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4338
4339                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4340                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4341                 }
4342
4343                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4344                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4345                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4346                         }
4347                 } else {
4348                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4349                 }
4350
4351                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4352                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4353                 // any further commitment updates after we set LocalShutdownSent.
4354                 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4355
4356                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4357                         Some(_) => false,
4358                         None => {
4359                                 assert!(send_shutdown);
4360                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4361                                         Ok(scriptpubkey) => scriptpubkey,
4362                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4363                                 };
4364                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
4365                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4366                                 }
4367                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4368                                 true
4369                         },
4370                 };
4371
4372                 // From here on out, we may not fail!
4373
4374                 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4375                 self.context.update_time_counter += 1;
4376
4377                 let monitor_update = if update_shutdown_script {
4378                         self.context.latest_monitor_update_id += 1;
4379                         let monitor_update = ChannelMonitorUpdate {
4380                                 update_id: self.context.latest_monitor_update_id,
4381                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4382                                         scriptpubkey: self.get_closing_scriptpubkey(),
4383                                 }],
4384                         };
4385                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4386                         self.push_ret_blockable_mon_update(monitor_update)
4387                 } else { None };
4388                 let shutdown = if send_shutdown {
4389                         Some(msgs::Shutdown {
4390                                 channel_id: self.context.channel_id,
4391                                 scriptpubkey: self.get_closing_scriptpubkey(),
4392                         })
4393                 } else { None };
4394
4395                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4396                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4397                 // cell HTLCs and return them to fail the payment.
4398                 self.context.holding_cell_update_fee = None;
4399                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4400                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4401                         match htlc_update {
4402                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4403                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4404                                         false
4405                                 },
4406                                 _ => true
4407                         }
4408                 });
4409
4410                 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4411                 self.context.update_time_counter += 1;
4412
4413                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4414         }
4415
4416         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4417                 let mut tx = closing_tx.trust().built_transaction().clone();
4418
4419                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4420
4421                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4422                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4423                 let mut holder_sig = sig.serialize_der().to_vec();
4424                 holder_sig.push(EcdsaSighashType::All as u8);
4425                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4426                 cp_sig.push(EcdsaSighashType::All as u8);
4427                 if funding_key[..] < counterparty_funding_key[..] {
4428                         tx.input[0].witness.push(holder_sig);
4429                         tx.input[0].witness.push(cp_sig);
4430                 } else {
4431                         tx.input[0].witness.push(cp_sig);
4432                         tx.input[0].witness.push(holder_sig);
4433                 }
4434
4435                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4436                 tx
4437         }
4438
4439         pub fn closing_signed<F: Deref>(
4440                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4441                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4442                 where F::Target: FeeEstimator
4443         {
4444                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4445                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4446                 }
4447                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4448                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4449                 }
4450                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4451                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4452                 }
4453                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4454                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4455                 }
4456
4457                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4458                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4459                 }
4460
4461                 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4462                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
4463                         return Ok((None, None));
4464                 }
4465
4466                 let funding_redeemscript = self.context.get_funding_redeemscript();
4467                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4468                 if used_total_fee != msg.fee_satoshis {
4469                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4470                 }
4471                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4472
4473                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4474                         Ok(_) => {},
4475                         Err(_e) => {
4476                                 // The remote end may have decided to revoke their output due to inconsistent dust
4477                                 // limits, so check for that case by re-checking the signature here.
4478                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4479                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4480                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4481                         },
4482                 };
4483
4484                 for outp in closing_tx.trust().built_transaction().output.iter() {
4485                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4486                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4487                         }
4488                 }
4489
4490                 assert!(self.context.shutdown_scriptpubkey.is_some());
4491                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4492                         if last_fee == msg.fee_satoshis {
4493                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4494                                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4495                                 self.context.update_time_counter += 1;
4496                                 return Ok((None, Some(tx)));
4497                         }
4498                 }
4499
4500                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4501
4502                 macro_rules! propose_fee {
4503                         ($new_fee: expr) => {
4504                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4505                                         (closing_tx, $new_fee)
4506                                 } else {
4507                                         self.build_closing_transaction($new_fee, false)
4508                                 };
4509
4510                                 return match &self.context.holder_signer {
4511                                         ChannelSignerType::Ecdsa(ecdsa) => {
4512                                                 let sig = ecdsa
4513                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4514                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4515
4516                                                 let signed_tx = if $new_fee == msg.fee_satoshis {
4517                                                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
4518                                                         self.context.update_time_counter += 1;
4519                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4520                                                         Some(tx)
4521                                                 } else { None };
4522
4523                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4524                                                 Ok((Some(msgs::ClosingSigned {
4525                                                         channel_id: self.context.channel_id,
4526                                                         fee_satoshis: used_fee,
4527                                                         signature: sig,
4528                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4529                                                                 min_fee_satoshis: our_min_fee,
4530                                                                 max_fee_satoshis: our_max_fee,
4531                                                         }),
4532                                                 }), signed_tx))
4533                                         }
4534                                 }
4535                         }
4536                 }
4537
4538                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4539                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4540                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4541                         }
4542                         if max_fee_satoshis < our_min_fee {
4543                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4544                         }
4545                         if min_fee_satoshis > our_max_fee {
4546                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4547                         }
4548
4549                         if !self.context.is_outbound() {
4550                                 // They have to pay, so pick the highest fee in the overlapping range.
4551                                 // We should never set an upper bound aside from their full balance
4552                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4553                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4554                         } else {
4555                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4556                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4557                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
4558                                 }
4559                                 // The proposed fee is in our acceptable range, accept it and broadcast!
4560                                 propose_fee!(msg.fee_satoshis);
4561                         }
4562                 } else {
4563                         // Old fee style negotiation. We don't bother to enforce whether they are complying
4564                         // with the "making progress" requirements, we just comply and hope for the best.
4565                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4566                                 if msg.fee_satoshis > last_fee {
4567                                         if msg.fee_satoshis < our_max_fee {
4568                                                 propose_fee!(msg.fee_satoshis);
4569                                         } else if last_fee < our_max_fee {
4570                                                 propose_fee!(our_max_fee);
4571                                         } else {
4572                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4573                                         }
4574                                 } else {
4575                                         if msg.fee_satoshis > our_min_fee {
4576                                                 propose_fee!(msg.fee_satoshis);
4577                                         } else if last_fee > our_min_fee {
4578                                                 propose_fee!(our_min_fee);
4579                                         } else {
4580                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4581                                         }
4582                                 }
4583                         } else {
4584                                 if msg.fee_satoshis < our_min_fee {
4585                                         propose_fee!(our_min_fee);
4586                                 } else if msg.fee_satoshis > our_max_fee {
4587                                         propose_fee!(our_max_fee);
4588                                 } else {
4589                                         propose_fee!(msg.fee_satoshis);
4590                                 }
4591                         }
4592                 }
4593         }
4594
4595         fn internal_htlc_satisfies_config(
4596                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4597         ) -> Result<(), (&'static str, u16)> {
4598                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4599                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4600                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4601                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4602                         return Err((
4603                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4604                                 0x1000 | 12, // fee_insufficient
4605                         ));
4606                 }
4607                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4608                         return Err((
4609                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4610                                 0x1000 | 13, // incorrect_cltv_expiry
4611                         ));
4612                 }
4613                 Ok(())
4614         }
4615
4616         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4617         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4618         /// unsuccessful, falls back to the previous one if one exists.
4619         pub fn htlc_satisfies_config(
4620                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4621         ) -> Result<(), (&'static str, u16)> {
4622                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4623                         .or_else(|err| {
4624                                 if let Some(prev_config) = self.context.prev_config() {
4625                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4626                                 } else {
4627                                         Err(err)
4628                                 }
4629                         })
4630         }
4631
4632         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4633                 self.context.cur_holder_commitment_transaction_number + 1
4634         }
4635
4636         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4637                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4638         }
4639
4640         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4641                 self.context.cur_counterparty_commitment_transaction_number + 2
4642         }
4643
4644         #[cfg(test)]
4645         pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
4646                 &self.context.holder_signer
4647         }
4648
4649         #[cfg(test)]
4650         pub fn get_value_stat(&self) -> ChannelValueStat {
4651                 ChannelValueStat {
4652                         value_to_self_msat: self.context.value_to_self_msat,
4653                         channel_value_msat: self.context.channel_value_satoshis * 1000,
4654                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4655                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4656                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4657                         holding_cell_outbound_amount_msat: {
4658                                 let mut res = 0;
4659                                 for h in self.context.holding_cell_htlc_updates.iter() {
4660                                         match h {
4661                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4662                                                         res += amount_msat;
4663                                                 }
4664                                                 _ => {}
4665                                         }
4666                                 }
4667                                 res
4668                         },
4669                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4670                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4671                 }
4672         }
4673
4674         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4675         /// Allowed in any state (including after shutdown)
4676         pub fn is_awaiting_monitor_update(&self) -> bool {
4677                 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4678         }
4679
4680         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4681         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4682                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4683                 self.context.blocked_monitor_updates[0].update.update_id - 1
4684         }
4685
4686         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4687         /// further blocked monitor update exists after the next.
4688         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4689                 if self.context.blocked_monitor_updates.is_empty() { return None; }
4690                 Some((self.context.blocked_monitor_updates.remove(0).update,
4691                         !self.context.blocked_monitor_updates.is_empty()))
4692         }
4693
4694         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4695         /// immediately given to the user for persisting or `None` if it should be held as blocked.
4696         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4697         -> Option<ChannelMonitorUpdate> {
4698                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4699                 if !release_monitor {
4700                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4701                                 update,
4702                         });
4703                         None
4704                 } else {
4705                         Some(update)
4706                 }
4707         }
4708
4709         pub fn blocked_monitor_updates_pending(&self) -> usize {
4710                 self.context.blocked_monitor_updates.len()
4711         }
4712
4713         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4714         /// If the channel is outbound, this implies we have not yet broadcasted the funding
4715         /// transaction. If the channel is inbound, this implies simply that the channel has not
4716         /// advanced state.
4717         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4718                 if !self.is_awaiting_monitor_update() { return false; }
4719                 if self.context.channel_state &
4720                         !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4721                                 == ChannelState::FundingSent as u32 {
4722                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4723                         // FundingSent set, though our peer could have sent their channel_ready.
4724                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4725                         return true;
4726                 }
4727                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4728                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4729                         // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4730                         // waiting for the initial monitor persistence. Thus, we check if our commitment
4731                         // transaction numbers have both been iterated only exactly once (for the
4732                         // funding_signed), and we're awaiting monitor update.
4733                         //
4734                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4735                         // only way to get an awaiting-monitor-update state during initial funding is if the
4736                         // initial monitor persistence is still pending).
4737                         //
4738                         // Because deciding we're awaiting initial broadcast spuriously could result in
4739                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4740                         // we hard-assert here, even in production builds.
4741                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4742                         assert!(self.context.monitor_pending_channel_ready);
4743                         assert_eq!(self.context.latest_monitor_update_id, 0);
4744                         return true;
4745                 }
4746                 false
4747         }
4748
4749         /// Returns true if our channel_ready has been sent
4750         pub fn is_our_channel_ready(&self) -> bool {
4751                 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4752         }
4753
4754         /// Returns true if our peer has either initiated or agreed to shut down the channel.
4755         pub fn received_shutdown(&self) -> bool {
4756                 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4757         }
4758
4759         /// Returns true if we either initiated or agreed to shut down the channel.
4760         pub fn sent_shutdown(&self) -> bool {
4761                 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4762         }
4763
4764         /// Returns true if this channel is fully shut down. True here implies that no further actions
4765         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4766         /// will be handled appropriately by the chain monitor.
4767         pub fn is_shutdown(&self) -> bool {
4768                 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
4769                         assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4770                         true
4771                 } else { false }
4772         }
4773
4774         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4775                 self.context.channel_update_status
4776         }
4777
4778         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4779                 self.context.update_time_counter += 1;
4780                 self.context.channel_update_status = status;
4781         }
4782
4783         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4784                 // Called:
4785                 //  * always when a new block/transactions are confirmed with the new height
4786                 //  * when funding is signed with a height of 0
4787                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4788                         return None;
4789                 }
4790
4791                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4792                 if funding_tx_confirmations <= 0 {
4793                         self.context.funding_tx_confirmation_height = 0;
4794                 }
4795
4796                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4797                         return None;
4798                 }
4799
4800                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4801                 // channel_ready until the entire batch is ready.
4802                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4803                 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4804                         self.context.channel_state |= ChannelState::OurChannelReady as u32;
4805                         true
4806                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4807                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4808                         self.context.update_time_counter += 1;
4809                         true
4810                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4811                         // We got a reorg but not enough to trigger a force close, just ignore.
4812                         false
4813                 } else {
4814                         if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4815                                 // We should never see a funding transaction on-chain until we've received
4816                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4817                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4818                                 // however, may do this and we shouldn't treat it as a bug.
4819                                 #[cfg(not(fuzzing))]
4820                                 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4821                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4822                                         self.context.channel_state);
4823                         }
4824                         // We got a reorg but not enough to trigger a force close, just ignore.
4825                         false
4826                 };
4827
4828                 if need_commitment_update {
4829                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4830                                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4831                                         let next_per_commitment_point =
4832                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4833                                         return Some(msgs::ChannelReady {
4834                                                 channel_id: self.context.channel_id,
4835                                                 next_per_commitment_point,
4836                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4837                                         });
4838                                 }
4839                         } else {
4840                                 self.context.monitor_pending_channel_ready = true;
4841                         }
4842                 }
4843                 None
4844         }
4845
4846         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4847         /// In the first case, we store the confirmation height and calculating the short channel id.
4848         /// In the second, we simply return an Err indicating we need to be force-closed now.
4849         pub fn transactions_confirmed<NS: Deref, L: Deref>(
4850                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
4851                 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
4852         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4853         where
4854                 NS::Target: NodeSigner,
4855                 L::Target: Logger
4856         {
4857                 let mut msgs = (None, None);
4858                 if let Some(funding_txo) = self.context.get_funding_txo() {
4859                         for &(index_in_block, tx) in txdata.iter() {
4860                                 // Check if the transaction is the expected funding transaction, and if it is,
4861                                 // check that it pays the right amount to the right script.
4862                                 if self.context.funding_tx_confirmation_height == 0 {
4863                                         if tx.txid() == funding_txo.txid {
4864                                                 let txo_idx = funding_txo.index as usize;
4865                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
4866                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
4867                                                         if self.context.is_outbound() {
4868                                                                 // If we generated the funding transaction and it doesn't match what it
4869                                                                 // should, the client is really broken and we should just panic and
4870                                                                 // tell them off. That said, because hash collisions happen with high
4871                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
4872                                                                 // channel and move on.
4873                                                                 #[cfg(not(fuzzing))]
4874                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4875                                                         }
4876                                                         self.context.update_time_counter += 1;
4877                                                         let err_reason = "funding tx had wrong script/value or output index";
4878                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
4879                                                 } else {
4880                                                         if self.context.is_outbound() {
4881                                                                 if !tx.is_coin_base() {
4882                                                                         for input in tx.input.iter() {
4883                                                                                 if input.witness.is_empty() {
4884                                                                                         // We generated a malleable funding transaction, implying we've
4885                                                                                         // just exposed ourselves to funds loss to our counterparty.
4886                                                                                         #[cfg(not(fuzzing))]
4887                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4888                                                                                 }
4889                                                                         }
4890                                                                 }
4891                                                         }
4892                                                         self.context.funding_tx_confirmation_height = height;
4893                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
4894                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
4895                                                                 Ok(scid) => Some(scid),
4896                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
4897                                                         }
4898                                                 }
4899                                                 // If this is a coinbase transaction and not a 0-conf channel
4900                                                 // we should update our min_depth to 100 to handle coinbase maturity
4901                                                 if tx.is_coin_base() &&
4902                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
4903                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
4904                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
4905                                                 }
4906                                         }
4907                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
4908                                         // send it immediately instead of waiting for a best_block_updated call (which
4909                                         // may have already happened for this block).
4910                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
4911                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4912                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
4913                                                 msgs = (Some(channel_ready), announcement_sigs);
4914                                         }
4915                                 }
4916                                 for inp in tx.input.iter() {
4917                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
4918                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
4919                                                 return Err(ClosureReason::CommitmentTxConfirmed);
4920                                         }
4921                                 }
4922                         }
4923                 }
4924                 Ok(msgs)
4925         }
4926
4927         /// When a new block is connected, we check the height of the block against outbound holding
4928         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
4929         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
4930         /// handled by the ChannelMonitor.
4931         ///
4932         /// If we return Err, the channel may have been closed, at which point the standard
4933         /// requirements apply - no calls may be made except those explicitly stated to be allowed
4934         /// post-shutdown.
4935         ///
4936         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
4937         /// back.
4938         pub fn best_block_updated<NS: Deref, L: Deref>(
4939                 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
4940                 node_signer: &NS, user_config: &UserConfig, logger: &L
4941         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4942         where
4943                 NS::Target: NodeSigner,
4944                 L::Target: Logger
4945         {
4946                 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
4947         }
4948
4949         fn do_best_block_updated<NS: Deref, L: Deref>(
4950                 &mut self, height: u32, highest_header_time: u32,
4951                 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
4952         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4953         where
4954                 NS::Target: NodeSigner,
4955                 L::Target: Logger
4956         {
4957                 let mut timed_out_htlcs = Vec::new();
4958                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4959                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
4960                 // ~now.
4961                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
4962                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4963                         match htlc_update {
4964                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
4965                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
4966                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
4967                                                 false
4968                                         } else { true }
4969                                 },
4970                                 _ => true
4971                         }
4972                 });
4973
4974                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
4975
4976                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4977                         let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
4978                                 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
4979                         } else { None };
4980                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4981                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
4982                 }
4983
4984                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4985                 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
4986                    (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
4987                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4988                         if self.context.funding_tx_confirmation_height == 0 {
4989                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
4990                                 // zero if it has been reorged out, however in either case, our state flags
4991                                 // indicate we've already sent a channel_ready
4992                                 funding_tx_confirmations = 0;
4993                         }
4994
4995                         // If we've sent channel_ready (or have both sent and received channel_ready), and
4996                         // the funding transaction has become unconfirmed,
4997                         // close the channel and hope we can get the latest state on chain (because presumably
4998                         // the funding transaction is at least still in the mempool of most nodes).
4999                         //
5000                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5001                         // 0-conf channel, but not doing so may lead to the
5002                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
5003                         // to.
5004                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5005                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5006                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5007                                 return Err(ClosureReason::ProcessingError { err: err_reason });
5008                         }
5009                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5010                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5011                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5012                         // If funding_tx_confirmed_in is unset, the channel must not be active
5013                         assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5014                         assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5015                         return Err(ClosureReason::FundingTimedOut);
5016                 }
5017
5018                 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5019                         self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5020                 } else { None };
5021                 Ok((None, timed_out_htlcs, announcement_sigs))
5022         }
5023
5024         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5025         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5026         /// before the channel has reached channel_ready and we can just wait for more blocks.
5027         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5028                 if self.context.funding_tx_confirmation_height != 0 {
5029                         // We handle the funding disconnection by calling best_block_updated with a height one
5030                         // below where our funding was connected, implying a reorg back to conf_height - 1.
5031                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
5032                         // We use the time field to bump the current time we set on channel updates if its
5033                         // larger. If we don't know that time has moved forward, we can just set it to the last
5034                         // time we saw and it will be ignored.
5035                         let best_time = self.context.update_time_counter;
5036                         match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5037                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5038                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5039                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5040                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5041                                         Ok(())
5042                                 },
5043                                 Err(e) => Err(e)
5044                         }
5045                 } else {
5046                         // We never learned about the funding confirmation anyway, just ignore
5047                         Ok(())
5048                 }
5049         }
5050
5051         // Methods to get unprompted messages to send to the remote end (or where we already returned
5052         // something in the handler for the message that prompted this message):
5053
5054         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5055         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5056         /// directions). Should be used for both broadcasted announcements and in response to an
5057         /// AnnouncementSignatures message from the remote peer.
5058         ///
5059         /// Will only fail if we're not in a state where channel_announcement may be sent (including
5060         /// closing).
5061         ///
5062         /// This will only return ChannelError::Ignore upon failure.
5063         ///
5064         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5065         fn get_channel_announcement<NS: Deref>(
5066                 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5067         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5068                 if !self.context.config.announced_channel {
5069                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5070                 }
5071                 if !self.context.is_usable() {
5072                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5073                 }
5074
5075                 let short_channel_id = self.context.get_short_channel_id()
5076                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5077                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5078                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5079                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5080                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5081
5082                 let msg = msgs::UnsignedChannelAnnouncement {
5083                         features: channelmanager::provided_channel_features(&user_config),
5084                         chain_hash,
5085                         short_channel_id,
5086                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5087                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5088                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5089                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5090                         excess_data: Vec::new(),
5091                 };
5092
5093                 Ok(msg)
5094         }
5095
5096         fn get_announcement_sigs<NS: Deref, L: Deref>(
5097                 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5098                 best_block_height: u32, logger: &L
5099         ) -> Option<msgs::AnnouncementSignatures>
5100         where
5101                 NS::Target: NodeSigner,
5102                 L::Target: Logger
5103         {
5104                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5105                         return None;
5106                 }
5107
5108                 if !self.context.is_usable() {
5109                         return None;
5110                 }
5111
5112                 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5113                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5114                         return None;
5115                 }
5116
5117                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5118                         return None;
5119                 }
5120
5121                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5122                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5123                         Ok(a) => a,
5124                         Err(e) => {
5125                                 log_trace!(logger, "{:?}", e);
5126                                 return None;
5127                         }
5128                 };
5129                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5130                         Err(_) => {
5131                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5132                                 return None;
5133                         },
5134                         Ok(v) => v
5135                 };
5136                 match &self.context.holder_signer {
5137                         ChannelSignerType::Ecdsa(ecdsa) => {
5138                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5139                                         Err(_) => {
5140                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5141                                                 return None;
5142                                         },
5143                                         Ok(v) => v
5144                                 };
5145                                 let short_channel_id = match self.context.get_short_channel_id() {
5146                                         Some(scid) => scid,
5147                                         None => return None,
5148                                 };
5149
5150                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5151
5152                                 Some(msgs::AnnouncementSignatures {
5153                                         channel_id: self.context.channel_id(),
5154                                         short_channel_id,
5155                                         node_signature: our_node_sig,
5156                                         bitcoin_signature: our_bitcoin_sig,
5157                                 })
5158                         }
5159                 }
5160         }
5161
5162         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5163         /// available.
5164         fn sign_channel_announcement<NS: Deref>(
5165                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5166         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5167                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5168                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5169                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5170                         let were_node_one = announcement.node_id_1 == our_node_key;
5171
5172                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5173                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5174                         match &self.context.holder_signer {
5175                                 ChannelSignerType::Ecdsa(ecdsa) => {
5176                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5177                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5178                                         Ok(msgs::ChannelAnnouncement {
5179                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5180                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5181                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5182                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5183                                                 contents: announcement,
5184                                         })
5185                                 }
5186                         }
5187                 } else {
5188                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5189                 }
5190         }
5191
5192         /// Processes an incoming announcement_signatures message, providing a fully-signed
5193         /// channel_announcement message which we can broadcast and storing our counterparty's
5194         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5195         pub fn announcement_signatures<NS: Deref>(
5196                 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5197                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5198         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5199                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5200
5201                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5202
5203                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5204                         return Err(ChannelError::Close(format!(
5205                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5206                                  &announcement, self.context.get_counterparty_node_id())));
5207                 }
5208                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5209                         return Err(ChannelError::Close(format!(
5210                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5211                                 &announcement, self.context.counterparty_funding_pubkey())));
5212                 }
5213
5214                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5215                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5216                         return Err(ChannelError::Ignore(
5217                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5218                 }
5219
5220                 self.sign_channel_announcement(node_signer, announcement)
5221         }
5222
5223         /// Gets a signed channel_announcement for this channel, if we previously received an
5224         /// announcement_signatures from our counterparty.
5225         pub fn get_signed_channel_announcement<NS: Deref>(
5226                 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5227         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5228                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5229                         return None;
5230                 }
5231                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5232                         Ok(res) => res,
5233                         Err(_) => return None,
5234                 };
5235                 match self.sign_channel_announcement(node_signer, announcement) {
5236                         Ok(res) => Some(res),
5237                         Err(_) => None,
5238                 }
5239         }
5240
5241         /// May panic if called on a channel that wasn't immediately-previously
5242         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5243         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5244                 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5245                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5246                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5247                 // current to_remote balances. However, it no longer has any use, and thus is now simply
5248                 // set to a dummy (but valid, as required by the spec) public key.
5249                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5250                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5251                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5252                 let mut pk = [2; 33]; pk[1] = 0xff;
5253                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5254                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5255                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5256                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5257                         remote_last_secret
5258                 } else {
5259                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5260                         [0;32]
5261                 };
5262                 self.mark_awaiting_response();
5263                 msgs::ChannelReestablish {
5264                         channel_id: self.context.channel_id(),
5265                         // The protocol has two different commitment number concepts - the "commitment
5266                         // transaction number", which starts from 0 and counts up, and the "revocation key
5267                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5268                         // commitment transaction numbers by the index which will be used to reveal the
5269                         // revocation key for that commitment transaction, which means we have to convert them
5270                         // to protocol-level commitment numbers here...
5271
5272                         // next_local_commitment_number is the next commitment_signed number we expect to
5273                         // receive (indicating if they need to resend one that we missed).
5274                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5275                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5276                         // receive, however we track it by the next commitment number for a remote transaction
5277                         // (which is one further, as they always revoke previous commitment transaction, not
5278                         // the one we send) so we have to decrement by 1. Note that if
5279                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5280                         // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5281                         // overflow here.
5282                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5283                         your_last_per_commitment_secret: remote_last_secret,
5284                         my_current_per_commitment_point: dummy_pubkey,
5285                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5286                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5287                         // txid of that interactive transaction, else we MUST NOT set it.
5288                         next_funding_txid: None,
5289                 }
5290         }
5291
5292
5293         // Send stuff to our remote peers:
5294
5295         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5296         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5297         /// commitment update.
5298         ///
5299         /// `Err`s will only be [`ChannelError::Ignore`].
5300         pub fn queue_add_htlc<F: Deref, L: Deref>(
5301                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5302                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5303                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5304         ) -> Result<(), ChannelError>
5305         where F::Target: FeeEstimator, L::Target: Logger
5306         {
5307                 self
5308                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5309                                 skimmed_fee_msat, fee_estimator, logger)
5310                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5311                         .map_err(|err| {
5312                                 if let ChannelError::Ignore(_) = err { /* fine */ }
5313                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5314                                 err
5315                         })
5316         }
5317
5318         /// Adds a pending outbound HTLC to this channel, note that you probably want
5319         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5320         ///
5321         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5322         /// the wire:
5323         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5324         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5325         ///   awaiting ACK.
5326         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5327         ///   we may not yet have sent the previous commitment update messages and will need to
5328         ///   regenerate them.
5329         ///
5330         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5331         /// on this [`Channel`] if `force_holding_cell` is false.
5332         ///
5333         /// `Err`s will only be [`ChannelError::Ignore`].
5334         fn send_htlc<F: Deref, L: Deref>(
5335                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5336                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5337                 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5338         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5339         where F::Target: FeeEstimator, L::Target: Logger
5340         {
5341                 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5342                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5343                 }
5344                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5345                 if amount_msat > channel_total_msat {
5346                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5347                 }
5348
5349                 if amount_msat == 0 {
5350                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5351                 }
5352
5353                 let available_balances = self.context.get_available_balances(fee_estimator);
5354                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5355                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5356                                 available_balances.next_outbound_htlc_minimum_msat)));
5357                 }
5358
5359                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5360                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5361                                 available_balances.next_outbound_htlc_limit_msat)));
5362                 }
5363
5364                 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5365                         // Note that this should never really happen, if we're !is_live() on receipt of an
5366                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5367                         // the user to send directly into a !is_live() channel. However, if we
5368                         // disconnected during the time the previous hop was doing the commitment dance we may
5369                         // end up getting here after the forwarding delay. In any case, returning an
5370                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5371                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5372                 }
5373
5374                 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5375                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5376                         payment_hash, amount_msat,
5377                         if force_holding_cell { "into holding cell" }
5378                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5379                         else { "to peer" });
5380
5381                 if need_holding_cell {
5382                         force_holding_cell = true;
5383                 }
5384
5385                 // Now update local state:
5386                 if force_holding_cell {
5387                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5388                                 amount_msat,
5389                                 payment_hash,
5390                                 cltv_expiry,
5391                                 source,
5392                                 onion_routing_packet,
5393                                 skimmed_fee_msat,
5394                         });
5395                         return Ok(None);
5396                 }
5397
5398                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5399                         htlc_id: self.context.next_holder_htlc_id,
5400                         amount_msat,
5401                         payment_hash: payment_hash.clone(),
5402                         cltv_expiry,
5403                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5404                         source,
5405                         skimmed_fee_msat,
5406                 });
5407
5408                 let res = msgs::UpdateAddHTLC {
5409                         channel_id: self.context.channel_id,
5410                         htlc_id: self.context.next_holder_htlc_id,
5411                         amount_msat,
5412                         payment_hash,
5413                         cltv_expiry,
5414                         onion_routing_packet,
5415                         skimmed_fee_msat,
5416                 };
5417                 self.context.next_holder_htlc_id += 1;
5418
5419                 Ok(Some(res))
5420         }
5421
5422         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5423                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5424                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5425                 // fail to generate this, we still are at least at a position where upgrading their status
5426                 // is acceptable.
5427                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5428                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5429                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5430                         } else { None };
5431                         if let Some(state) = new_state {
5432                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5433                                 htlc.state = state;
5434                         }
5435                 }
5436                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5437                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5438                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5439                                 // Grab the preimage, if it exists, instead of cloning
5440                                 let mut reason = OutboundHTLCOutcome::Success(None);
5441                                 mem::swap(outcome, &mut reason);
5442                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5443                         }
5444                 }
5445                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5446                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5447                                 debug_assert!(!self.context.is_outbound());
5448                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5449                                 self.context.feerate_per_kw = feerate;
5450                                 self.context.pending_update_fee = None;
5451                         }
5452                 }
5453                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5454
5455                 let (mut htlcs_ref, counterparty_commitment_tx) =
5456                         self.build_commitment_no_state_update(logger);
5457                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5458                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5459                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5460
5461                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5462                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5463                 }
5464
5465                 self.context.latest_monitor_update_id += 1;
5466                 let monitor_update = ChannelMonitorUpdate {
5467                         update_id: self.context.latest_monitor_update_id,
5468                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5469                                 commitment_txid: counterparty_commitment_txid,
5470                                 htlc_outputs: htlcs.clone(),
5471                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5472                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5473                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5474                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5475                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5476                         }]
5477                 };
5478                 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5479                 monitor_update
5480         }
5481
5482         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5483         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5484         where L::Target: Logger
5485         {
5486                 let counterparty_keys = self.context.build_remote_transaction_keys();
5487                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5488                 let counterparty_commitment_tx = commitment_stats.tx;
5489
5490                 #[cfg(any(test, fuzzing))]
5491                 {
5492                         if !self.context.is_outbound() {
5493                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5494                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5495                                 if let Some(info) = projected_commit_tx_info {
5496                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5497                                         if info.total_pending_htlcs == total_pending_htlcs
5498                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5499                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5500                                                 && info.feerate == self.context.feerate_per_kw {
5501                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5502                                                         assert_eq!(actual_fee, info.fee);
5503                                                 }
5504                                 }
5505                         }
5506                 }
5507
5508                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5509         }
5510
5511         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5512         /// generation when we shouldn't change HTLC/channel state.
5513         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5514                 // Get the fee tests from `build_commitment_no_state_update`
5515                 #[cfg(any(test, fuzzing))]
5516                 self.build_commitment_no_state_update(logger);
5517
5518                 let counterparty_keys = self.context.build_remote_transaction_keys();
5519                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5520                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5521
5522                 match &self.context.holder_signer {
5523                         ChannelSignerType::Ecdsa(ecdsa) => {
5524                                 let (signature, htlc_signatures);
5525
5526                                 {
5527                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5528                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5529                                                 htlcs.push(htlc);
5530                                         }
5531
5532                                         let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5533                                                 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5534                                         signature = res.0;
5535                                         htlc_signatures = res.1;
5536
5537                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5538                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5539                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5540                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5541
5542                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5543                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5544                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5545                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5546                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5547                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5548                                         }
5549                                 }
5550
5551                                 Ok((msgs::CommitmentSigned {
5552                                         channel_id: self.context.channel_id,
5553                                         signature,
5554                                         htlc_signatures,
5555                                         #[cfg(taproot)]
5556                                         partial_signature_with_nonce: None,
5557                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5558                         }
5559                 }
5560         }
5561
5562         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5563         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5564         ///
5565         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5566         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5567         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5568                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5569                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5570                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5571         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5572         where F::Target: FeeEstimator, L::Target: Logger
5573         {
5574                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5575                         onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5576                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5577                 match send_res? {
5578                         Some(_) => {
5579                                 let monitor_update = self.build_commitment_no_status_check(logger);
5580                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5581                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
5582                         },
5583                         None => Ok(None)
5584                 }
5585         }
5586
5587         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5588         /// happened.
5589         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5590                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5591                         fee_base_msat: msg.contents.fee_base_msat,
5592                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5593                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
5594                 });
5595                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5596                 if did_change {
5597                         self.context.counterparty_forwarding_info = new_forwarding_info;
5598                 }
5599
5600                 Ok(did_change)
5601         }
5602
5603         /// Begins the shutdown process, getting a message for the remote peer and returning all
5604         /// holding cell HTLCs for payment failure.
5605         ///
5606         /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5607         /// [`ChannelMonitorUpdate`] will be returned).
5608         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5609                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5610         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
5611         {
5612                 for htlc in self.context.pending_outbound_htlcs.iter() {
5613                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5614                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5615                         }
5616                 }
5617                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5618                         if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5619                                 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5620                         }
5621                         else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5622                                 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5623                         }
5624                 }
5625                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5626                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5627                 }
5628                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5629                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5630                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5631                 }
5632
5633                 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5634                 // script is set, we just force-close and call it a day.
5635                 let mut chan_closed = false;
5636                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5637                         chan_closed = true;
5638                 }
5639
5640                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5641                         Some(_) => false,
5642                         None if !chan_closed => {
5643                                 // use override shutdown script if provided
5644                                 let shutdown_scriptpubkey = match override_shutdown_script {
5645                                         Some(script) => script,
5646                                         None => {
5647                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
5648                                                 match signer_provider.get_shutdown_scriptpubkey() {
5649                                                         Ok(scriptpubkey) => scriptpubkey,
5650                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5651                                                 }
5652                                         },
5653                                 };
5654                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5655                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5656                                 }
5657                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5658                                 true
5659                         },
5660                         None => false,
5661                 };
5662
5663                 // From here on out, we may not fail!
5664                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5665                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5666                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
5667                 } else {
5668                         self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5669                 }
5670                 self.context.update_time_counter += 1;
5671
5672                 let monitor_update = if update_shutdown_script {
5673                         self.context.latest_monitor_update_id += 1;
5674                         let monitor_update = ChannelMonitorUpdate {
5675                                 update_id: self.context.latest_monitor_update_id,
5676                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5677                                         scriptpubkey: self.get_closing_scriptpubkey(),
5678                                 }],
5679                         };
5680                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5681                         self.push_ret_blockable_mon_update(monitor_update)
5682                 } else { None };
5683                 let shutdown = msgs::Shutdown {
5684                         channel_id: self.context.channel_id,
5685                         scriptpubkey: self.get_closing_scriptpubkey(),
5686                 };
5687
5688                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5689                 // our shutdown until we've committed all of the pending changes.
5690                 self.context.holding_cell_update_fee = None;
5691                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5692                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5693                         match htlc_update {
5694                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5695                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5696                                         false
5697                                 },
5698                                 _ => true
5699                         }
5700                 });
5701
5702                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5703                         "we can't both complete shutdown and return a monitor update");
5704
5705                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5706         }
5707
5708         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5709                 self.context.holding_cell_htlc_updates.iter()
5710                         .flat_map(|htlc_update| {
5711                                 match htlc_update {
5712                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5713                                                 => Some((source, payment_hash)),
5714                                         _ => None,
5715                                 }
5716                         })
5717                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5718         }
5719 }
5720
5721 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5722 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5723         pub context: ChannelContext<SP>,
5724         pub unfunded_context: UnfundedChannelContext,
5725 }
5726
5727 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5728         pub fn new<ES: Deref, F: Deref>(
5729                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5730                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5731                 outbound_scid_alias: u64
5732         ) -> Result<OutboundV1Channel<SP>, APIError>
5733         where ES::Target: EntropySource,
5734               F::Target: FeeEstimator
5735         {
5736                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5737                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5738                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5739                 let pubkeys = holder_signer.pubkeys().clone();
5740
5741                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5742                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5743                 }
5744                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5745                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5746                 }
5747                 let channel_value_msat = channel_value_satoshis * 1000;
5748                 if push_msat > channel_value_msat {
5749                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5750                 }
5751                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5752                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5753                 }
5754                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5755                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5756                         // Protocol level safety check in place, although it should never happen because
5757                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5758                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5759                 }
5760
5761                 let channel_type = Self::get_initial_channel_type(&config, their_features);
5762                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5763
5764                 let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5765                         (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5766                 } else {
5767                         (ConfirmationTarget::NonAnchorChannelFee, 0)
5768                 };
5769                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5770
5771                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5772                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5773                 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5774                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5775                 }
5776
5777                 let mut secp_ctx = Secp256k1::new();
5778                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5779
5780                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5781                         match signer_provider.get_shutdown_scriptpubkey() {
5782                                 Ok(scriptpubkey) => Some(scriptpubkey),
5783                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5784                         }
5785                 } else { None };
5786
5787                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5788                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
5789                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5790                         }
5791                 }
5792
5793                 let destination_script = match signer_provider.get_destination_script() {
5794                         Ok(script) => script,
5795                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5796                 };
5797
5798                 let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
5799
5800                 Ok(Self {
5801                         context: ChannelContext {
5802                                 user_id,
5803
5804                                 config: LegacyChannelConfig {
5805                                         options: config.channel_config.clone(),
5806                                         announced_channel: config.channel_handshake_config.announced_channel,
5807                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5808                                 },
5809
5810                                 prev_config: None,
5811
5812                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5813
5814                                 channel_id: temporary_channel_id,
5815                                 temporary_channel_id: Some(temporary_channel_id),
5816                                 channel_state: ChannelState::OurInitSent as u32,
5817                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
5818                                 secp_ctx,
5819                                 channel_value_satoshis,
5820
5821                                 latest_monitor_update_id: 0,
5822
5823                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
5824                                 shutdown_scriptpubkey,
5825                                 destination_script,
5826
5827                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5828                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5829                                 value_to_self_msat,
5830
5831                                 pending_inbound_htlcs: Vec::new(),
5832                                 pending_outbound_htlcs: Vec::new(),
5833                                 holding_cell_htlc_updates: Vec::new(),
5834                                 pending_update_fee: None,
5835                                 holding_cell_update_fee: None,
5836                                 next_holder_htlc_id: 0,
5837                                 next_counterparty_htlc_id: 0,
5838                                 update_time_counter: 1,
5839
5840                                 resend_order: RAACommitmentOrder::CommitmentFirst,
5841
5842                                 monitor_pending_channel_ready: false,
5843                                 monitor_pending_revoke_and_ack: false,
5844                                 monitor_pending_commitment_signed: false,
5845                                 monitor_pending_forwards: Vec::new(),
5846                                 monitor_pending_failures: Vec::new(),
5847                                 monitor_pending_finalized_fulfills: Vec::new(),
5848
5849                                 signer_pending_commitment_update: false,
5850
5851                                 #[cfg(debug_assertions)]
5852                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5853                                 #[cfg(debug_assertions)]
5854                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5855
5856                                 last_sent_closing_fee: None,
5857                                 pending_counterparty_closing_signed: None,
5858                                 closing_fee_limits: None,
5859                                 target_closing_feerate_sats_per_kw: None,
5860
5861                                 funding_tx_confirmed_in: None,
5862                                 funding_tx_confirmation_height: 0,
5863                                 short_channel_id: None,
5864                                 channel_creation_height: current_chain_height,
5865
5866                                 feerate_per_kw: commitment_feerate,
5867                                 counterparty_dust_limit_satoshis: 0,
5868                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
5869                                 counterparty_max_htlc_value_in_flight_msat: 0,
5870                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
5871                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
5872                                 holder_selected_channel_reserve_satoshis,
5873                                 counterparty_htlc_minimum_msat: 0,
5874                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
5875                                 counterparty_max_accepted_htlcs: 0,
5876                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
5877                                 minimum_depth: None, // Filled in in accept_channel
5878
5879                                 counterparty_forwarding_info: None,
5880
5881                                 channel_transaction_parameters: ChannelTransactionParameters {
5882                                         holder_pubkeys: pubkeys,
5883                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
5884                                         is_outbound_from_holder: true,
5885                                         counterparty_parameters: None,
5886                                         funding_outpoint: None,
5887                                         channel_type_features: channel_type.clone()
5888                                 },
5889                                 funding_transaction: None,
5890                                 is_batch_funding: None,
5891
5892                                 counterparty_cur_commitment_point: None,
5893                                 counterparty_prev_commitment_point: None,
5894                                 counterparty_node_id,
5895
5896                                 counterparty_shutdown_scriptpubkey: None,
5897
5898                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
5899
5900                                 channel_update_status: ChannelUpdateStatus::Enabled,
5901                                 closing_signed_in_flight: false,
5902
5903                                 announcement_sigs: None,
5904
5905                                 #[cfg(any(test, fuzzing))]
5906                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
5907                                 #[cfg(any(test, fuzzing))]
5908                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
5909
5910                                 workaround_lnd_bug_4006: None,
5911                                 sent_message_awaiting_response: None,
5912
5913                                 latest_inbound_scid_alias: None,
5914                                 outbound_scid_alias,
5915
5916                                 channel_pending_event_emitted: false,
5917                                 channel_ready_event_emitted: false,
5918
5919                                 #[cfg(any(test, fuzzing))]
5920                                 historical_inbound_htlc_fulfills: HashSet::new(),
5921
5922                                 channel_type,
5923                                 channel_keys_id,
5924
5925                                 blocked_monitor_updates: Vec::new(),
5926                         },
5927                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
5928                 })
5929         }
5930
5931         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
5932         fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5933                 let counterparty_keys = self.context.build_remote_transaction_keys();
5934                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5935                 match &self.context.holder_signer {
5936                         // TODO (taproot|arik): move match into calling method for Taproot
5937                         ChannelSignerType::Ecdsa(ecdsa) => {
5938                                 Ok(ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5939                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5940                         }
5941                 }
5942         }
5943
5944         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5945         /// a funding_created message for the remote peer.
5946         /// Panics if called at some time other than immediately after initial handshake, if called twice,
5947         /// or if called on an inbound channel.
5948         /// Note that channel_id changes during this call!
5949         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5950         /// If an Err is returned, it is a ChannelError::Close.
5951         pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
5952         -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
5953                 if !self.context.is_outbound() {
5954                         panic!("Tried to create outbound funding_created message on an inbound channel!");
5955                 }
5956                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5957                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5958                 }
5959                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5960                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5961                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5962                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5963                 }
5964
5965                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5966                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
5967
5968                 let signature = match self.get_funding_created_signature(logger) {
5969                         Ok(res) => res,
5970                         Err(e) => {
5971                                 log_error!(logger, "Got bad signatures: {:?}!", e);
5972                                 self.context.channel_transaction_parameters.funding_outpoint = None;
5973                                 return Err((self, e));
5974                         }
5975                 };
5976
5977                 let temporary_channel_id = self.context.channel_id;
5978
5979                 // Now that we're past error-generating stuff, update our local state:
5980
5981                 self.context.channel_state = ChannelState::FundingCreated as u32;
5982                 self.context.channel_id = funding_txo.to_channel_id();
5983
5984                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
5985                 // We can skip this if it is a zero-conf channel.
5986                 if funding_transaction.is_coin_base() &&
5987                         self.context.minimum_depth.unwrap_or(0) > 0 &&
5988                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5989                         self.context.minimum_depth = Some(COINBASE_MATURITY);
5990                 }
5991
5992                 self.context.funding_transaction = Some(funding_transaction);
5993                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
5994
5995                 let channel = Channel {
5996                         context: self.context,
5997                 };
5998
5999                 Ok((channel, msgs::FundingCreated {
6000                         temporary_channel_id,
6001                         funding_txid: funding_txo.txid,
6002                         funding_output_index: funding_txo.index,
6003                         signature,
6004                         #[cfg(taproot)]
6005                         partial_signature_with_nonce: None,
6006                         #[cfg(taproot)]
6007                         next_local_nonce: None,
6008                 }))
6009         }
6010
6011         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6012                 // The default channel type (ie the first one we try) depends on whether the channel is
6013                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6014                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6015                 // with no other changes, and fall back to `only_static_remotekey`.
6016                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6017                 if !config.channel_handshake_config.announced_channel &&
6018                         config.channel_handshake_config.negotiate_scid_privacy &&
6019                         their_features.supports_scid_privacy() {
6020                         ret.set_scid_privacy_required();
6021                 }
6022
6023                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6024                 // set it now. If they don't understand it, we'll fall back to our default of
6025                 // `only_static_remotekey`.
6026                 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6027                         their_features.supports_anchors_zero_fee_htlc_tx() {
6028                         ret.set_anchors_zero_fee_htlc_tx_required();
6029                 }
6030
6031                 ret
6032         }
6033
6034         /// If we receive an error message, it may only be a rejection of the channel type we tried,
6035         /// not of our ability to open any channel at all. Thus, on error, we should first call this
6036         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6037         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6038                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6039         ) -> Result<msgs::OpenChannel, ()>
6040         where
6041                 F::Target: FeeEstimator
6042         {
6043                 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6044                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6045                         // We've exhausted our options
6046                         return Err(());
6047                 }
6048                 // We support opening a few different types of channels. Try removing our additional
6049                 // features one by one until we've either arrived at our default or the counterparty has
6050                 // accepted one.
6051                 //
6052                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6053                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6054                 // checks whether the counterparty supports every feature, this would only happen if the
6055                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6056                 // whatever reason.
6057                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6058                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6059                         self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6060                         assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6061                 } else if self.context.channel_type.supports_scid_privacy() {
6062                         self.context.channel_type.clear_scid_privacy();
6063                 } else {
6064                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6065                 }
6066                 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6067                 Ok(self.get_open_channel(chain_hash))
6068         }
6069
6070         pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6071                 if !self.context.is_outbound() {
6072                         panic!("Tried to open a channel for an inbound channel?");
6073                 }
6074                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6075                         panic!("Cannot generate an open_channel after we've moved forward");
6076                 }
6077
6078                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6079                         panic!("Tried to send an open_channel for a channel that has already advanced");
6080                 }
6081
6082                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6083                 let keys = self.context.get_holder_pubkeys();
6084
6085                 msgs::OpenChannel {
6086                         chain_hash,
6087                         temporary_channel_id: self.context.channel_id,
6088                         funding_satoshis: self.context.channel_value_satoshis,
6089                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6090                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6091                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6092                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6093                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6094                         feerate_per_kw: self.context.feerate_per_kw as u32,
6095                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6096                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6097                         funding_pubkey: keys.funding_pubkey,
6098                         revocation_basepoint: keys.revocation_basepoint,
6099                         payment_point: keys.payment_point,
6100                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6101                         htlc_basepoint: keys.htlc_basepoint,
6102                         first_per_commitment_point,
6103                         channel_flags: if self.context.config.announced_channel {1} else {0},
6104                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6105                                 Some(script) => script.clone().into_inner(),
6106                                 None => Builder::new().into_script(),
6107                         }),
6108                         channel_type: Some(self.context.channel_type.clone()),
6109                 }
6110         }
6111
6112         // Message handlers
6113         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6114                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6115
6116                 // Check sanity of message fields:
6117                 if !self.context.is_outbound() {
6118                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6119                 }
6120                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6121                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6122                 }
6123                 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6124                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6125                 }
6126                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6127                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6128                 }
6129                 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6130                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6131                 }
6132                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6133                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6134                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6135                 }
6136                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6137                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6138                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6139                 }
6140                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6141                 if msg.to_self_delay > max_delay_acceptable {
6142                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6143                 }
6144                 if msg.max_accepted_htlcs < 1 {
6145                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6146                 }
6147                 if msg.max_accepted_htlcs > MAX_HTLCS {
6148                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6149                 }
6150
6151                 // Now check against optional parameters as set by config...
6152                 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6153                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6154                 }
6155                 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6156                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6157                 }
6158                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6159                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6160                 }
6161                 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6162                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6163                 }
6164                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6165                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6166                 }
6167                 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6168                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6169                 }
6170                 if msg.minimum_depth > peer_limits.max_minimum_depth {
6171                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6172                 }
6173
6174                 if let Some(ty) = &msg.channel_type {
6175                         if *ty != self.context.channel_type {
6176                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6177                         }
6178                 } else if their_features.supports_channel_type() {
6179                         // Assume they've accepted the channel type as they said they understand it.
6180                 } else {
6181                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6182                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6183                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6184                         }
6185                         self.context.channel_type = channel_type.clone();
6186                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
6187                 }
6188
6189                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6190                         match &msg.shutdown_scriptpubkey {
6191                                 &Some(ref script) => {
6192                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6193                                         if script.len() == 0 {
6194                                                 None
6195                                         } else {
6196                                                 if !script::is_bolt2_compliant(&script, their_features) {
6197                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6198                                                 }
6199                                                 Some(script.clone())
6200                                         }
6201                                 },
6202                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6203                                 &None => {
6204                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6205                                 }
6206                         }
6207                 } else { None };
6208
6209                 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6210                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6211                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6212                 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6213                 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6214
6215                 if peer_limits.trust_own_funding_0conf {
6216                         self.context.minimum_depth = Some(msg.minimum_depth);
6217                 } else {
6218                         self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6219                 }
6220
6221                 let counterparty_pubkeys = ChannelPublicKeys {
6222                         funding_pubkey: msg.funding_pubkey,
6223                         revocation_basepoint: msg.revocation_basepoint,
6224                         payment_point: msg.payment_point,
6225                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6226                         htlc_basepoint: msg.htlc_basepoint
6227                 };
6228
6229                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6230                         selected_contest_delay: msg.to_self_delay,
6231                         pubkeys: counterparty_pubkeys,
6232                 });
6233
6234                 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6235                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6236
6237                 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6238                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6239
6240                 Ok(())
6241         }
6242 }
6243
6244 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6245 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6246         pub context: ChannelContext<SP>,
6247         pub unfunded_context: UnfundedChannelContext,
6248 }
6249
6250 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6251         /// Creates a new channel from a remote sides' request for one.
6252         /// Assumes chain_hash has already been checked and corresponds with what we expect!
6253         pub fn new<ES: Deref, F: Deref, L: Deref>(
6254                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6255                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6256                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6257                 current_chain_height: u32, logger: &L, is_0conf: bool,
6258         ) -> Result<InboundV1Channel<SP>, ChannelError>
6259                 where ES::Target: EntropySource,
6260                           F::Target: FeeEstimator,
6261                           L::Target: Logger,
6262         {
6263                 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6264
6265                 // First check the channel type is known, failing before we do anything else if we don't
6266                 // support this channel type.
6267                 let channel_type = if let Some(channel_type) = &msg.channel_type {
6268                         if channel_type.supports_any_optional_bits() {
6269                                 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6270                         }
6271
6272                         // We only support the channel types defined by the `ChannelManager` in
6273                         // `provided_channel_type_features`. The channel type must always support
6274                         // `static_remote_key`.
6275                         if !channel_type.requires_static_remote_key() {
6276                                 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6277                         }
6278                         // Make sure we support all of the features behind the channel type.
6279                         if !channel_type.is_subset(our_supported_features) {
6280                                 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6281                         }
6282                         if channel_type.requires_scid_privacy() && announced_channel {
6283                                 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6284                         }
6285                         channel_type.clone()
6286                 } else {
6287                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6288                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6289                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6290                         }
6291                         channel_type
6292                 };
6293
6294                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6295                 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6296                 let pubkeys = holder_signer.pubkeys().clone();
6297                 let counterparty_pubkeys = ChannelPublicKeys {
6298                         funding_pubkey: msg.funding_pubkey,
6299                         revocation_basepoint: msg.revocation_basepoint,
6300                         payment_point: msg.payment_point,
6301                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6302                         htlc_basepoint: msg.htlc_basepoint
6303                 };
6304
6305                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6306                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6307                 }
6308
6309                 // Check sanity of message fields:
6310                 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6311                         return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6312                 }
6313                 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6314                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6315                 }
6316                 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6317                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6318                 }
6319                 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6320                 if msg.push_msat > full_channel_value_msat {
6321                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6322                 }
6323                 if msg.dust_limit_satoshis > msg.funding_satoshis {
6324                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6325                 }
6326                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6327                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6328                 }
6329                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6330
6331                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6332                 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6333                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6334                 }
6335                 if msg.max_accepted_htlcs < 1 {
6336                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6337                 }
6338                 if msg.max_accepted_htlcs > MAX_HTLCS {
6339                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6340                 }
6341
6342                 // Now check against optional parameters as set by config...
6343                 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6344                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6345                 }
6346                 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6347                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
6348                 }
6349                 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6350                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6351                 }
6352                 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6353                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6354                 }
6355                 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6356                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6357                 }
6358                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6359                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6360                 }
6361                 if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
6362                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6363                 }
6364
6365                 // Convert things into internal flags and prep our state:
6366
6367                 if config.channel_handshake_limits.force_announced_channel_preference {
6368                         if config.channel_handshake_config.announced_channel != announced_channel {
6369                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6370                         }
6371                 }
6372
6373                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6374                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6375                         // Protocol level safety check in place, although it should never happen because
6376                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6377                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6378                 }
6379                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6380                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6381                 }
6382                 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6383                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6384                                 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6385                 }
6386                 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6387                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6388                 }
6389
6390                 // check if the funder's amount for the initial commitment tx is sufficient
6391                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6392                 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6393                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6394                 } else {
6395                         0
6396                 };
6397                 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6398                 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6399                 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6400                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6401                 }
6402
6403                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6404                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6405                 // want to push much to us), our counterparty should always have more than our reserve.
6406                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6407                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6408                 }
6409
6410                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6411                         match &msg.shutdown_scriptpubkey {
6412                                 &Some(ref script) => {
6413                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6414                                         if script.len() == 0 {
6415                                                 None
6416                                         } else {
6417                                                 if !script::is_bolt2_compliant(&script, their_features) {
6418                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6419                                                 }
6420                                                 Some(script.clone())
6421                                         }
6422                                 },
6423                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6424                                 &None => {
6425                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6426                                 }
6427                         }
6428                 } else { None };
6429
6430                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6431                         match signer_provider.get_shutdown_scriptpubkey() {
6432                                 Ok(scriptpubkey) => Some(scriptpubkey),
6433                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6434                         }
6435                 } else { None };
6436
6437                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6438                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6439                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6440                         }
6441                 }
6442
6443                 let destination_script = match signer_provider.get_destination_script() {
6444                         Ok(script) => script,
6445                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6446                 };
6447
6448                 let mut secp_ctx = Secp256k1::new();
6449                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6450
6451                 let minimum_depth = if is_0conf {
6452                         Some(0)
6453                 } else {
6454                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6455                 };
6456
6457                 let chan = Self {
6458                         context: ChannelContext {
6459                                 user_id,
6460
6461                                 config: LegacyChannelConfig {
6462                                         options: config.channel_config.clone(),
6463                                         announced_channel,
6464                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6465                                 },
6466
6467                                 prev_config: None,
6468
6469                                 inbound_handshake_limits_override: None,
6470
6471                                 temporary_channel_id: Some(msg.temporary_channel_id),
6472                                 channel_id: msg.temporary_channel_id,
6473                                 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6474                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6475                                 secp_ctx,
6476
6477                                 latest_monitor_update_id: 0,
6478
6479                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6480                                 shutdown_scriptpubkey,
6481                                 destination_script,
6482
6483                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6484                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6485                                 value_to_self_msat: msg.push_msat,
6486
6487                                 pending_inbound_htlcs: Vec::new(),
6488                                 pending_outbound_htlcs: Vec::new(),
6489                                 holding_cell_htlc_updates: Vec::new(),
6490                                 pending_update_fee: None,
6491                                 holding_cell_update_fee: None,
6492                                 next_holder_htlc_id: 0,
6493                                 next_counterparty_htlc_id: 0,
6494                                 update_time_counter: 1,
6495
6496                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6497
6498                                 monitor_pending_channel_ready: false,
6499                                 monitor_pending_revoke_and_ack: false,
6500                                 monitor_pending_commitment_signed: false,
6501                                 monitor_pending_forwards: Vec::new(),
6502                                 monitor_pending_failures: Vec::new(),
6503                                 monitor_pending_finalized_fulfills: Vec::new(),
6504
6505                                 signer_pending_commitment_update: false,
6506
6507                                 #[cfg(debug_assertions)]
6508                                 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6509                                 #[cfg(debug_assertions)]
6510                                 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6511
6512                                 last_sent_closing_fee: None,
6513                                 pending_counterparty_closing_signed: None,
6514                                 closing_fee_limits: None,
6515                                 target_closing_feerate_sats_per_kw: None,
6516
6517                                 funding_tx_confirmed_in: None,
6518                                 funding_tx_confirmation_height: 0,
6519                                 short_channel_id: None,
6520                                 channel_creation_height: current_chain_height,
6521
6522                                 feerate_per_kw: msg.feerate_per_kw,
6523                                 channel_value_satoshis: msg.funding_satoshis,
6524                                 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6525                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6526                                 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6527                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6528                                 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6529                                 holder_selected_channel_reserve_satoshis,
6530                                 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6531                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6532                                 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6533                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6534                                 minimum_depth,
6535
6536                                 counterparty_forwarding_info: None,
6537
6538                                 channel_transaction_parameters: ChannelTransactionParameters {
6539                                         holder_pubkeys: pubkeys,
6540                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6541                                         is_outbound_from_holder: false,
6542                                         counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6543                                                 selected_contest_delay: msg.to_self_delay,
6544                                                 pubkeys: counterparty_pubkeys,
6545                                         }),
6546                                         funding_outpoint: None,
6547                                         channel_type_features: channel_type.clone()
6548                                 },
6549                                 funding_transaction: None,
6550                                 is_batch_funding: None,
6551
6552                                 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6553                                 counterparty_prev_commitment_point: None,
6554                                 counterparty_node_id,
6555
6556                                 counterparty_shutdown_scriptpubkey,
6557
6558                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6559
6560                                 channel_update_status: ChannelUpdateStatus::Enabled,
6561                                 closing_signed_in_flight: false,
6562
6563                                 announcement_sigs: None,
6564
6565                                 #[cfg(any(test, fuzzing))]
6566                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6567                                 #[cfg(any(test, fuzzing))]
6568                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6569
6570                                 workaround_lnd_bug_4006: None,
6571                                 sent_message_awaiting_response: None,
6572
6573                                 latest_inbound_scid_alias: None,
6574                                 outbound_scid_alias: 0,
6575
6576                                 channel_pending_event_emitted: false,
6577                                 channel_ready_event_emitted: false,
6578
6579                                 #[cfg(any(test, fuzzing))]
6580                                 historical_inbound_htlc_fulfills: HashSet::new(),
6581
6582                                 channel_type,
6583                                 channel_keys_id,
6584
6585                                 blocked_monitor_updates: Vec::new(),
6586                         },
6587                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6588                 };
6589
6590                 Ok(chan)
6591         }
6592
6593         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6594         /// should be sent back to the counterparty node.
6595         ///
6596         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6597         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6598                 if self.context.is_outbound() {
6599                         panic!("Tried to send accept_channel for an outbound channel?");
6600                 }
6601                 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6602                         panic!("Tried to send accept_channel after channel had moved forward");
6603                 }
6604                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6605                         panic!("Tried to send an accept_channel for a channel that has already advanced");
6606                 }
6607
6608                 self.generate_accept_channel_message()
6609         }
6610
6611         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6612         /// inbound channel. If the intention is to accept an inbound channel, use
6613         /// [`InboundV1Channel::accept_inbound_channel`] instead.
6614         ///
6615         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6616         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6617                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6618                 let keys = self.context.get_holder_pubkeys();
6619
6620                 msgs::AcceptChannel {
6621                         temporary_channel_id: self.context.channel_id,
6622                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6623                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6624                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6625                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6626                         minimum_depth: self.context.minimum_depth.unwrap(),
6627                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6628                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6629                         funding_pubkey: keys.funding_pubkey,
6630                         revocation_basepoint: keys.revocation_basepoint,
6631                         payment_point: keys.payment_point,
6632                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6633                         htlc_basepoint: keys.htlc_basepoint,
6634                         first_per_commitment_point,
6635                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6636                                 Some(script) => script.clone().into_inner(),
6637                                 None => Builder::new().into_script(),
6638                         }),
6639                         channel_type: Some(self.context.channel_type.clone()),
6640                         #[cfg(taproot)]
6641                         next_local_nonce: None,
6642                 }
6643         }
6644
6645         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6646         /// inbound channel without accepting it.
6647         ///
6648         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6649         #[cfg(test)]
6650         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6651                 self.generate_accept_channel_message()
6652         }
6653
6654         fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
6655                 let funding_script = self.context.get_funding_redeemscript();
6656
6657                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6658                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6659                 {
6660                         let trusted_tx = initial_commitment_tx.trust();
6661                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6662                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6663                         // They sign the holder commitment transaction...
6664                         log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6665                                 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6666                                 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6667                                 encode::serialize_hex(&funding_script), &self.context.channel_id());
6668                         secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6669                 }
6670
6671                 let counterparty_keys = self.context.build_remote_transaction_keys();
6672                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6673
6674                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6675                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6676                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6677                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6678
6679                 match &self.context.holder_signer {
6680                         // TODO (arik): move match into calling method for Taproot
6681                         ChannelSignerType::Ecdsa(ecdsa) => {
6682                                 let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
6683                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
6684
6685                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
6686                                 Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature))
6687                         }
6688                 }
6689         }
6690
6691         pub fn funding_created<L: Deref>(
6692                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6693         ) -> Result<(Channel<SP>, msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
6694         where
6695                 L::Target: Logger
6696         {
6697                 if self.context.is_outbound() {
6698                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6699                 }
6700                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6701                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6702                         // remember the channel, so it's safe to just send an error_message here and drop the
6703                         // channel.
6704                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6705                 }
6706                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6707                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6708                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6709                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6710                 }
6711
6712                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6713                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6714                 // This is an externally observable change before we finish all our checks.  In particular
6715                 // funding_created_signature may fail.
6716                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6717
6718                 let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
6719                         Ok(res) => res,
6720                         Err(ChannelError::Close(e)) => {
6721                                 self.context.channel_transaction_parameters.funding_outpoint = None;
6722                                 return Err((self, ChannelError::Close(e)));
6723                         },
6724                         Err(e) => {
6725                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
6726                                 // to make sure we don't continue with an inconsistent state.
6727                                 panic!("unexpected error type from funding_created_signature {:?}", e);
6728                         }
6729                 };
6730
6731                 let holder_commitment_tx = HolderCommitmentTransaction::new(
6732                         initial_commitment_tx,
6733                         msg.signature,
6734                         Vec::new(),
6735                         &self.context.get_holder_pubkeys().funding_pubkey,
6736                         self.context.counterparty_funding_pubkey()
6737                 );
6738
6739                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6740                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6741                 }
6742
6743                 // Now that we're past error-generating stuff, update our local state:
6744
6745                 let funding_redeemscript = self.context.get_funding_redeemscript();
6746                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6747                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6748                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6749                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6750                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6751                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6752                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
6753                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6754                                                           &self.context.channel_transaction_parameters,
6755                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
6756                                                           obscure_factor,
6757                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
6758
6759                 channel_monitor.provide_initial_counterparty_commitment_tx(
6760                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6761                         self.context.cur_counterparty_commitment_transaction_number,
6762                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6763                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6764                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6765
6766                 self.context.channel_state = ChannelState::FundingSent as u32;
6767                 self.context.channel_id = funding_txo.to_channel_id();
6768                 self.context.cur_counterparty_commitment_transaction_number -= 1;
6769                 self.context.cur_holder_commitment_transaction_number -= 1;
6770
6771                 log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
6772
6773                 // Promote the channel to a full-fledged one now that we have updated the state and have a
6774                 // `ChannelMonitor`.
6775                 let mut channel = Channel {
6776                         context: self.context,
6777                 };
6778                 let channel_id = channel.context.channel_id.clone();
6779                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6780                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6781
6782                 Ok((channel, msgs::FundingSigned {
6783                         channel_id,
6784                         signature,
6785                         #[cfg(taproot)]
6786                         partial_signature_with_nonce: None,
6787                 }, channel_monitor))
6788         }
6789 }
6790
6791 const SERIALIZATION_VERSION: u8 = 3;
6792 const MIN_SERIALIZATION_VERSION: u8 = 2;
6793
6794 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6795         (0, FailRelay),
6796         (1, FailMalformed),
6797         (2, Fulfill),
6798 );
6799
6800 impl Writeable for ChannelUpdateStatus {
6801         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6802                 // We only care about writing out the current state as it was announced, ie only either
6803                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6804                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6805                 match self {
6806                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6807                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6808                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6809                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6810                 }
6811                 Ok(())
6812         }
6813 }
6814
6815 impl Readable for ChannelUpdateStatus {
6816         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6817                 Ok(match <u8 as Readable>::read(reader)? {
6818                         0 => ChannelUpdateStatus::Enabled,
6819                         1 => ChannelUpdateStatus::Disabled,
6820                         _ => return Err(DecodeError::InvalidValue),
6821                 })
6822         }
6823 }
6824
6825 impl Writeable for AnnouncementSigsState {
6826         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6827                 // We only care about writing out the current state as if we had just disconnected, at
6828                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6829                 match self {
6830                         AnnouncementSigsState::NotSent => 0u8.write(writer),
6831                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
6832                         AnnouncementSigsState::Committed => 0u8.write(writer),
6833                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6834                 }
6835         }
6836 }
6837
6838 impl Readable for AnnouncementSigsState {
6839         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6840                 Ok(match <u8 as Readable>::read(reader)? {
6841                         0 => AnnouncementSigsState::NotSent,
6842                         1 => AnnouncementSigsState::PeerReceived,
6843                         _ => return Err(DecodeError::InvalidValue),
6844                 })
6845         }
6846 }
6847
6848 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
6849         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6850                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6851                 // called.
6852
6853                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6854
6855                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6856                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6857                 // the low bytes now and the optional high bytes later.
6858                 let user_id_low = self.context.user_id as u64;
6859                 user_id_low.write(writer)?;
6860
6861                 // Version 1 deserializers expected to read parts of the config object here. Version 2
6862                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6863                 // `minimum_depth` we simply write dummy values here.
6864                 writer.write_all(&[0; 8])?;
6865
6866                 self.context.channel_id.write(writer)?;
6867                 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6868                 self.context.channel_value_satoshis.write(writer)?;
6869
6870                 self.context.latest_monitor_update_id.write(writer)?;
6871
6872                 let mut key_data = VecWriter(Vec::new());
6873                 // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
6874                 self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
6875                 assert!(key_data.0.len() < core::usize::MAX);
6876                 assert!(key_data.0.len() < core::u32::MAX as usize);
6877                 (key_data.0.len() as u32).write(writer)?;
6878                 writer.write_all(&key_data.0[..])?;
6879
6880                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6881                 // deserialized from that format.
6882                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6883                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6884                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6885                 }
6886                 self.context.destination_script.write(writer)?;
6887
6888                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6889                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6890                 self.context.value_to_self_msat.write(writer)?;
6891
6892                 let mut dropped_inbound_htlcs = 0;
6893                 for htlc in self.context.pending_inbound_htlcs.iter() {
6894                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6895                                 dropped_inbound_htlcs += 1;
6896                         }
6897                 }
6898                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6899                 for htlc in self.context.pending_inbound_htlcs.iter() {
6900                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6901                                 continue; // Drop
6902                         }
6903                         htlc.htlc_id.write(writer)?;
6904                         htlc.amount_msat.write(writer)?;
6905                         htlc.cltv_expiry.write(writer)?;
6906                         htlc.payment_hash.write(writer)?;
6907                         match &htlc.state {
6908                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6909                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6910                                         1u8.write(writer)?;
6911                                         htlc_state.write(writer)?;
6912                                 },
6913                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6914                                         2u8.write(writer)?;
6915                                         htlc_state.write(writer)?;
6916                                 },
6917                                 &InboundHTLCState::Committed => {
6918                                         3u8.write(writer)?;
6919                                 },
6920                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6921                                         4u8.write(writer)?;
6922                                         removal_reason.write(writer)?;
6923                                 },
6924                         }
6925                 }
6926
6927                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6928                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
6929
6930                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6931                 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
6932                         htlc.htlc_id.write(writer)?;
6933                         htlc.amount_msat.write(writer)?;
6934                         htlc.cltv_expiry.write(writer)?;
6935                         htlc.payment_hash.write(writer)?;
6936                         htlc.source.write(writer)?;
6937                         match &htlc.state {
6938                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6939                                         0u8.write(writer)?;
6940                                         onion_packet.write(writer)?;
6941                                 },
6942                                 &OutboundHTLCState::Committed => {
6943                                         1u8.write(writer)?;
6944                                 },
6945                                 &OutboundHTLCState::RemoteRemoved(_) => {
6946                                         // Treat this as a Committed because we haven't received the CS - they'll
6947                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6948                                         1u8.write(writer)?;
6949                                 },
6950                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6951                                         3u8.write(writer)?;
6952                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6953                                                 preimages.push(preimage);
6954                                         }
6955                                         let reason: Option<&HTLCFailReason> = outcome.into();
6956                                         reason.write(writer)?;
6957                                 }
6958                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6959                                         4u8.write(writer)?;
6960                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6961                                                 preimages.push(preimage);
6962                                         }
6963                                         let reason: Option<&HTLCFailReason> = outcome.into();
6964                                         reason.write(writer)?;
6965                                 }
6966                         }
6967                         if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
6968                                 if pending_outbound_skimmed_fees.is_empty() {
6969                                         for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
6970                                 }
6971                                 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
6972                         } else if !pending_outbound_skimmed_fees.is_empty() {
6973                                 pending_outbound_skimmed_fees.push(None);
6974                         }
6975                 }
6976
6977                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
6978                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
6979                 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
6980                         match update {
6981                                 &HTLCUpdateAwaitingACK::AddHTLC {
6982                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
6983                                         skimmed_fee_msat,
6984                                 } => {
6985                                         0u8.write(writer)?;
6986                                         amount_msat.write(writer)?;
6987                                         cltv_expiry.write(writer)?;
6988                                         payment_hash.write(writer)?;
6989                                         source.write(writer)?;
6990                                         onion_routing_packet.write(writer)?;
6991
6992                                         if let Some(skimmed_fee) = skimmed_fee_msat {
6993                                                 if holding_cell_skimmed_fees.is_empty() {
6994                                                         for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
6995                                                 }
6996                                                 holding_cell_skimmed_fees.push(Some(skimmed_fee));
6997                                         } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
6998                                 },
6999                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7000                                         1u8.write(writer)?;
7001                                         payment_preimage.write(writer)?;
7002                                         htlc_id.write(writer)?;
7003                                 },
7004                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7005                                         2u8.write(writer)?;
7006                                         htlc_id.write(writer)?;
7007                                         err_packet.write(writer)?;
7008                                 }
7009                         }
7010                 }
7011
7012                 match self.context.resend_order {
7013                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7014                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7015                 }
7016
7017                 self.context.monitor_pending_channel_ready.write(writer)?;
7018                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7019                 self.context.monitor_pending_commitment_signed.write(writer)?;
7020
7021                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7022                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7023                         pending_forward.write(writer)?;
7024                         htlc_id.write(writer)?;
7025                 }
7026
7027                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7028                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7029                         htlc_source.write(writer)?;
7030                         payment_hash.write(writer)?;
7031                         fail_reason.write(writer)?;
7032                 }
7033
7034                 if self.context.is_outbound() {
7035                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7036                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7037                         Some(feerate).write(writer)?;
7038                 } else {
7039                         // As for inbound HTLCs, if the update was only announced and never committed in a
7040                         // commitment_signed, drop it.
7041                         None::<u32>.write(writer)?;
7042                 }
7043                 self.context.holding_cell_update_fee.write(writer)?;
7044
7045                 self.context.next_holder_htlc_id.write(writer)?;
7046                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7047                 self.context.update_time_counter.write(writer)?;
7048                 self.context.feerate_per_kw.write(writer)?;
7049
7050                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7051                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7052                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7053                 // consider the stale state on reload.
7054                 0u8.write(writer)?;
7055
7056                 self.context.funding_tx_confirmed_in.write(writer)?;
7057                 self.context.funding_tx_confirmation_height.write(writer)?;
7058                 self.context.short_channel_id.write(writer)?;
7059
7060                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7061                 self.context.holder_dust_limit_satoshis.write(writer)?;
7062                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7063
7064                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7065                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7066
7067                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7068                 self.context.holder_htlc_minimum_msat.write(writer)?;
7069                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7070
7071                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7072                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7073
7074                 match &self.context.counterparty_forwarding_info {
7075                         Some(info) => {
7076                                 1u8.write(writer)?;
7077                                 info.fee_base_msat.write(writer)?;
7078                                 info.fee_proportional_millionths.write(writer)?;
7079                                 info.cltv_expiry_delta.write(writer)?;
7080                         },
7081                         None => 0u8.write(writer)?
7082                 }
7083
7084                 self.context.channel_transaction_parameters.write(writer)?;
7085                 self.context.funding_transaction.write(writer)?;
7086
7087                 self.context.counterparty_cur_commitment_point.write(writer)?;
7088                 self.context.counterparty_prev_commitment_point.write(writer)?;
7089                 self.context.counterparty_node_id.write(writer)?;
7090
7091                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7092
7093                 self.context.commitment_secrets.write(writer)?;
7094
7095                 self.context.channel_update_status.write(writer)?;
7096
7097                 #[cfg(any(test, fuzzing))]
7098                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7099                 #[cfg(any(test, fuzzing))]
7100                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7101                         htlc.write(writer)?;
7102                 }
7103
7104                 // If the channel type is something other than only-static-remote-key, then we need to have
7105                 // older clients fail to deserialize this channel at all. If the type is
7106                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7107                 // out at all.
7108                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7109                         Some(&self.context.channel_type) } else { None };
7110
7111                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7112                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7113                 // a different percentage of the channel value then 10%, which older versions of LDK used
7114                 // to set it to before the percentage was made configurable.
7115                 let serialized_holder_selected_reserve =
7116                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7117                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7118
7119                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7120                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7121                 let serialized_holder_htlc_max_in_flight =
7122                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7123                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7124
7125                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7126                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7127
7128                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7129                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7130                 // we write the high bytes as an option here.
7131                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7132
7133                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7134
7135                 write_tlv_fields!(writer, {
7136                         (0, self.context.announcement_sigs, option),
7137                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7138                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
7139                         // them twice, once with their original default values above, and once as an option
7140                         // here. On the read side, old versions will simply ignore the odd-type entries here,
7141                         // and new versions map the default values to None and allow the TLV entries here to
7142                         // override that.
7143                         (1, self.context.minimum_depth, option),
7144                         (2, chan_type, option),
7145                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7146                         (4, serialized_holder_selected_reserve, option),
7147                         (5, self.context.config, required),
7148                         (6, serialized_holder_htlc_max_in_flight, option),
7149                         (7, self.context.shutdown_scriptpubkey, option),
7150                         (8, self.context.blocked_monitor_updates, optional_vec),
7151                         (9, self.context.target_closing_feerate_sats_per_kw, option),
7152                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7153                         (13, self.context.channel_creation_height, required),
7154                         (15, preimages, required_vec),
7155                         (17, self.context.announcement_sigs_state, required),
7156                         (19, self.context.latest_inbound_scid_alias, option),
7157                         (21, self.context.outbound_scid_alias, required),
7158                         (23, channel_ready_event_emitted, option),
7159                         (25, user_id_high_opt, option),
7160                         (27, self.context.channel_keys_id, required),
7161                         (28, holder_max_accepted_htlcs, option),
7162                         (29, self.context.temporary_channel_id, option),
7163                         (31, channel_pending_event_emitted, option),
7164                         (35, pending_outbound_skimmed_fees, optional_vec),
7165                         (37, holding_cell_skimmed_fees, optional_vec),
7166                         (38, self.context.is_batch_funding, option),
7167                 });
7168
7169                 Ok(())
7170         }
7171 }
7172
7173 const MAX_ALLOC_SIZE: usize = 64*1024;
7174 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7175                 where
7176                         ES::Target: EntropySource,
7177                         SP::Target: SignerProvider
7178 {
7179         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7180                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7181                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7182
7183                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7184                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7185                 // the low bytes now and the high bytes later.
7186                 let user_id_low: u64 = Readable::read(reader)?;
7187
7188                 let mut config = Some(LegacyChannelConfig::default());
7189                 if ver == 1 {
7190                         // Read the old serialization of the ChannelConfig from version 0.0.98.
7191                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7192                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7193                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7194                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7195                 } else {
7196                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7197                         let mut _val: u64 = Readable::read(reader)?;
7198                 }
7199
7200                 let channel_id = Readable::read(reader)?;
7201                 let channel_state = Readable::read(reader)?;
7202                 let channel_value_satoshis = Readable::read(reader)?;
7203
7204                 let latest_monitor_update_id = Readable::read(reader)?;
7205
7206                 let mut keys_data = None;
7207                 if ver <= 2 {
7208                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7209                         // the `channel_keys_id` TLV is present below.
7210                         let keys_len: u32 = Readable::read(reader)?;
7211                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7212                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
7213                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7214                                 let mut data = [0; 1024];
7215                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7216                                 reader.read_exact(read_slice)?;
7217                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7218                         }
7219                 }
7220
7221                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7222                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7223                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7224                         Err(_) => None,
7225                 };
7226                 let destination_script = Readable::read(reader)?;
7227
7228                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7229                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7230                 let value_to_self_msat = Readable::read(reader)?;
7231
7232                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7233
7234                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7235                 for _ in 0..pending_inbound_htlc_count {
7236                         pending_inbound_htlcs.push(InboundHTLCOutput {
7237                                 htlc_id: Readable::read(reader)?,
7238                                 amount_msat: Readable::read(reader)?,
7239                                 cltv_expiry: Readable::read(reader)?,
7240                                 payment_hash: Readable::read(reader)?,
7241                                 state: match <u8 as Readable>::read(reader)? {
7242                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7243                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7244                                         3 => InboundHTLCState::Committed,
7245                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7246                                         _ => return Err(DecodeError::InvalidValue),
7247                                 },
7248                         });
7249                 }
7250
7251                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7252                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7253                 for _ in 0..pending_outbound_htlc_count {
7254                         pending_outbound_htlcs.push(OutboundHTLCOutput {
7255                                 htlc_id: Readable::read(reader)?,
7256                                 amount_msat: Readable::read(reader)?,
7257                                 cltv_expiry: Readable::read(reader)?,
7258                                 payment_hash: Readable::read(reader)?,
7259                                 source: Readable::read(reader)?,
7260                                 state: match <u8 as Readable>::read(reader)? {
7261                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7262                                         1 => OutboundHTLCState::Committed,
7263                                         2 => {
7264                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7265                                                 OutboundHTLCState::RemoteRemoved(option.into())
7266                                         },
7267                                         3 => {
7268                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7269                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7270                                         },
7271                                         4 => {
7272                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7273                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7274                                         },
7275                                         _ => return Err(DecodeError::InvalidValue),
7276                                 },
7277                                 skimmed_fee_msat: None,
7278                         });
7279                 }
7280
7281                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7282                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7283                 for _ in 0..holding_cell_htlc_update_count {
7284                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7285                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
7286                                         amount_msat: Readable::read(reader)?,
7287                                         cltv_expiry: Readable::read(reader)?,
7288                                         payment_hash: Readable::read(reader)?,
7289                                         source: Readable::read(reader)?,
7290                                         onion_routing_packet: Readable::read(reader)?,
7291                                         skimmed_fee_msat: None,
7292                                 },
7293                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7294                                         payment_preimage: Readable::read(reader)?,
7295                                         htlc_id: Readable::read(reader)?,
7296                                 },
7297                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
7298                                         htlc_id: Readable::read(reader)?,
7299                                         err_packet: Readable::read(reader)?,
7300                                 },
7301                                 _ => return Err(DecodeError::InvalidValue),
7302                         });
7303                 }
7304
7305                 let resend_order = match <u8 as Readable>::read(reader)? {
7306                         0 => RAACommitmentOrder::CommitmentFirst,
7307                         1 => RAACommitmentOrder::RevokeAndACKFirst,
7308                         _ => return Err(DecodeError::InvalidValue),
7309                 };
7310
7311                 let monitor_pending_channel_ready = Readable::read(reader)?;
7312                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7313                 let monitor_pending_commitment_signed = Readable::read(reader)?;
7314
7315                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7316                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7317                 for _ in 0..monitor_pending_forwards_count {
7318                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7319                 }
7320
7321                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7322                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7323                 for _ in 0..monitor_pending_failures_count {
7324                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7325                 }
7326
7327                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7328
7329                 let holding_cell_update_fee = Readable::read(reader)?;
7330
7331                 let next_holder_htlc_id = Readable::read(reader)?;
7332                 let next_counterparty_htlc_id = Readable::read(reader)?;
7333                 let update_time_counter = Readable::read(reader)?;
7334                 let feerate_per_kw = Readable::read(reader)?;
7335
7336                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7337                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7338                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7339                 // consider the stale state on reload.
7340                 match <u8 as Readable>::read(reader)? {
7341                         0 => {},
7342                         1 => {
7343                                 let _: u32 = Readable::read(reader)?;
7344                                 let _: u64 = Readable::read(reader)?;
7345                                 let _: Signature = Readable::read(reader)?;
7346                         },
7347                         _ => return Err(DecodeError::InvalidValue),
7348                 }
7349
7350                 let funding_tx_confirmed_in = Readable::read(reader)?;
7351                 let funding_tx_confirmation_height = Readable::read(reader)?;
7352                 let short_channel_id = Readable::read(reader)?;
7353
7354                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7355                 let holder_dust_limit_satoshis = Readable::read(reader)?;
7356                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7357                 let mut counterparty_selected_channel_reserve_satoshis = None;
7358                 if ver == 1 {
7359                         // Read the old serialization from version 0.0.98.
7360                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7361                 } else {
7362                         // Read the 8 bytes of backwards-compatibility data.
7363                         let _dummy: u64 = Readable::read(reader)?;
7364                 }
7365                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7366                 let holder_htlc_minimum_msat = Readable::read(reader)?;
7367                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7368
7369                 let mut minimum_depth = None;
7370                 if ver == 1 {
7371                         // Read the old serialization from version 0.0.98.
7372                         minimum_depth = Some(Readable::read(reader)?);
7373                 } else {
7374                         // Read the 4 bytes of backwards-compatibility data.
7375                         let _dummy: u32 = Readable::read(reader)?;
7376                 }
7377
7378                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7379                         0 => None,
7380                         1 => Some(CounterpartyForwardingInfo {
7381                                 fee_base_msat: Readable::read(reader)?,
7382                                 fee_proportional_millionths: Readable::read(reader)?,
7383                                 cltv_expiry_delta: Readable::read(reader)?,
7384                         }),
7385                         _ => return Err(DecodeError::InvalidValue),
7386                 };
7387
7388                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7389                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7390
7391                 let counterparty_cur_commitment_point = Readable::read(reader)?;
7392
7393                 let counterparty_prev_commitment_point = Readable::read(reader)?;
7394                 let counterparty_node_id = Readable::read(reader)?;
7395
7396                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7397                 let commitment_secrets = Readable::read(reader)?;
7398
7399                 let channel_update_status = Readable::read(reader)?;
7400
7401                 #[cfg(any(test, fuzzing))]
7402                 let mut historical_inbound_htlc_fulfills = HashSet::new();
7403                 #[cfg(any(test, fuzzing))]
7404                 {
7405                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
7406                         for _ in 0..htlc_fulfills_len {
7407                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7408                         }
7409                 }
7410
7411                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7412                         Some((feerate, if channel_parameters.is_outbound_from_holder {
7413                                 FeeUpdateState::Outbound
7414                         } else {
7415                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7416                         }))
7417                 } else {
7418                         None
7419                 };
7420
7421                 let mut announcement_sigs = None;
7422                 let mut target_closing_feerate_sats_per_kw = None;
7423                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7424                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7425                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7426                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7427                 // only, so we default to that if none was written.
7428                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7429                 let mut channel_creation_height = Some(serialized_height);
7430                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7431
7432                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7433                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7434                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7435                 let mut latest_inbound_scid_alias = None;
7436                 let mut outbound_scid_alias = None;
7437                 let mut channel_pending_event_emitted = None;
7438                 let mut channel_ready_event_emitted = None;
7439
7440                 let mut user_id_high_opt: Option<u64> = None;
7441                 let mut channel_keys_id: Option<[u8; 32]> = None;
7442                 let mut temporary_channel_id: Option<ChannelId> = None;
7443                 let mut holder_max_accepted_htlcs: Option<u16> = None;
7444
7445                 let mut blocked_monitor_updates = Some(Vec::new());
7446
7447                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7448                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7449
7450                 let mut is_batch_funding: Option<()> = None;
7451
7452                 read_tlv_fields!(reader, {
7453                         (0, announcement_sigs, option),
7454                         (1, minimum_depth, option),
7455                         (2, channel_type, option),
7456                         (3, counterparty_selected_channel_reserve_satoshis, option),
7457                         (4, holder_selected_channel_reserve_satoshis, option),
7458                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7459                         (6, holder_max_htlc_value_in_flight_msat, option),
7460                         (7, shutdown_scriptpubkey, option),
7461                         (8, blocked_monitor_updates, optional_vec),
7462                         (9, target_closing_feerate_sats_per_kw, option),
7463                         (11, monitor_pending_finalized_fulfills, optional_vec),
7464                         (13, channel_creation_height, option),
7465                         (15, preimages_opt, optional_vec),
7466                         (17, announcement_sigs_state, option),
7467                         (19, latest_inbound_scid_alias, option),
7468                         (21, outbound_scid_alias, option),
7469                         (23, channel_ready_event_emitted, option),
7470                         (25, user_id_high_opt, option),
7471                         (27, channel_keys_id, option),
7472                         (28, holder_max_accepted_htlcs, option),
7473                         (29, temporary_channel_id, option),
7474                         (31, channel_pending_event_emitted, option),
7475                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
7476                         (37, holding_cell_skimmed_fees_opt, optional_vec),
7477                         (38, is_batch_funding, option),
7478                 });
7479
7480                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7481                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7482                         // If we've gotten to the funding stage of the channel, populate the signer with its
7483                         // required channel parameters.
7484                         let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7485                         if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7486                                 holder_signer.provide_channel_parameters(&channel_parameters);
7487                         }
7488                         (channel_keys_id, holder_signer)
7489                 } else {
7490                         // `keys_data` can be `None` if we had corrupted data.
7491                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7492                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7493                         (holder_signer.channel_keys_id(), holder_signer)
7494                 };
7495
7496                 if let Some(preimages) = preimages_opt {
7497                         let mut iter = preimages.into_iter();
7498                         for htlc in pending_outbound_htlcs.iter_mut() {
7499                                 match &htlc.state {
7500                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7501                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7502                                         }
7503                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7504                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7505                                         }
7506                                         _ => {}
7507                                 }
7508                         }
7509                         // We expect all preimages to be consumed above
7510                         if iter.next().is_some() {
7511                                 return Err(DecodeError::InvalidValue);
7512                         }
7513                 }
7514
7515                 let chan_features = channel_type.as_ref().unwrap();
7516                 if !chan_features.is_subset(our_supported_features) {
7517                         // If the channel was written by a new version and negotiated with features we don't
7518                         // understand yet, refuse to read it.
7519                         return Err(DecodeError::UnknownRequiredFeature);
7520                 }
7521
7522                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7523                 // To account for that, we're proactively setting/overriding the field here.
7524                 channel_parameters.channel_type_features = chan_features.clone();
7525
7526                 let mut secp_ctx = Secp256k1::new();
7527                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7528
7529                 // `user_id` used to be a single u64 value. In order to remain backwards
7530                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7531                 // separate u64 values.
7532                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7533
7534                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7535
7536                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7537                         let mut iter = skimmed_fees.into_iter();
7538                         for htlc in pending_outbound_htlcs.iter_mut() {
7539                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7540                         }
7541                         // We expect all skimmed fees to be consumed above
7542                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7543                 }
7544                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7545                         let mut iter = skimmed_fees.into_iter();
7546                         for htlc in holding_cell_htlc_updates.iter_mut() {
7547                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7548                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7549                                 }
7550                         }
7551                         // We expect all skimmed fees to be consumed above
7552                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7553                 }
7554
7555                 Ok(Channel {
7556                         context: ChannelContext {
7557                                 user_id,
7558
7559                                 config: config.unwrap(),
7560
7561                                 prev_config: None,
7562
7563                                 // Note that we don't care about serializing handshake limits as we only ever serialize
7564                                 // channel data after the handshake has completed.
7565                                 inbound_handshake_limits_override: None,
7566
7567                                 channel_id,
7568                                 temporary_channel_id,
7569                                 channel_state,
7570                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
7571                                 secp_ctx,
7572                                 channel_value_satoshis,
7573
7574                                 latest_monitor_update_id,
7575
7576                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7577                                 shutdown_scriptpubkey,
7578                                 destination_script,
7579
7580                                 cur_holder_commitment_transaction_number,
7581                                 cur_counterparty_commitment_transaction_number,
7582                                 value_to_self_msat,
7583
7584                                 holder_max_accepted_htlcs,
7585                                 pending_inbound_htlcs,
7586                                 pending_outbound_htlcs,
7587                                 holding_cell_htlc_updates,
7588
7589                                 resend_order,
7590
7591                                 monitor_pending_channel_ready,
7592                                 monitor_pending_revoke_and_ack,
7593                                 monitor_pending_commitment_signed,
7594                                 monitor_pending_forwards,
7595                                 monitor_pending_failures,
7596                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7597
7598                                 signer_pending_commitment_update: false,
7599
7600                                 pending_update_fee,
7601                                 holding_cell_update_fee,
7602                                 next_holder_htlc_id,
7603                                 next_counterparty_htlc_id,
7604                                 update_time_counter,
7605                                 feerate_per_kw,
7606
7607                                 #[cfg(debug_assertions)]
7608                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7609                                 #[cfg(debug_assertions)]
7610                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7611
7612                                 last_sent_closing_fee: None,
7613                                 pending_counterparty_closing_signed: None,
7614                                 closing_fee_limits: None,
7615                                 target_closing_feerate_sats_per_kw,
7616
7617                                 funding_tx_confirmed_in,
7618                                 funding_tx_confirmation_height,
7619                                 short_channel_id,
7620                                 channel_creation_height: channel_creation_height.unwrap(),
7621
7622                                 counterparty_dust_limit_satoshis,
7623                                 holder_dust_limit_satoshis,
7624                                 counterparty_max_htlc_value_in_flight_msat,
7625                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7626                                 counterparty_selected_channel_reserve_satoshis,
7627                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7628                                 counterparty_htlc_minimum_msat,
7629                                 holder_htlc_minimum_msat,
7630                                 counterparty_max_accepted_htlcs,
7631                                 minimum_depth,
7632
7633                                 counterparty_forwarding_info,
7634
7635                                 channel_transaction_parameters: channel_parameters,
7636                                 funding_transaction,
7637                                 is_batch_funding,
7638
7639                                 counterparty_cur_commitment_point,
7640                                 counterparty_prev_commitment_point,
7641                                 counterparty_node_id,
7642
7643                                 counterparty_shutdown_scriptpubkey,
7644
7645                                 commitment_secrets,
7646
7647                                 channel_update_status,
7648                                 closing_signed_in_flight: false,
7649
7650                                 announcement_sigs,
7651
7652                                 #[cfg(any(test, fuzzing))]
7653                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7654                                 #[cfg(any(test, fuzzing))]
7655                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7656
7657                                 workaround_lnd_bug_4006: None,
7658                                 sent_message_awaiting_response: None,
7659
7660                                 latest_inbound_scid_alias,
7661                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7662                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7663
7664                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7665                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7666
7667                                 #[cfg(any(test, fuzzing))]
7668                                 historical_inbound_htlc_fulfills,
7669
7670                                 channel_type: channel_type.unwrap(),
7671                                 channel_keys_id,
7672
7673                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7674                         }
7675                 })
7676         }
7677 }
7678
7679 #[cfg(test)]
7680 mod tests {
7681         use std::cmp;
7682         use bitcoin::blockdata::constants::ChainHash;
7683         use bitcoin::blockdata::script::{Script, Builder};
7684         use bitcoin::blockdata::transaction::{Transaction, TxOut};
7685         use bitcoin::blockdata::opcodes;
7686         use bitcoin::network::constants::Network;
7687         use hex;
7688         use crate::ln::PaymentHash;
7689         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7690         use crate::ln::channel::InitFeatures;
7691         use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7692         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7693         use crate::ln::features::ChannelTypeFeatures;
7694         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7695         use crate::ln::script::ShutdownScript;
7696         use crate::ln::chan_utils;
7697         use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7698         use crate::chain::BestBlock;
7699         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7700         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7701         use crate::chain::transaction::OutPoint;
7702         use crate::routing::router::Path;
7703         use crate::util::config::UserConfig;
7704         use crate::util::errors::APIError;
7705         use crate::util::test_utils;
7706         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7707         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7708         use bitcoin::secp256k1::ffi::Signature as FFISignature;
7709         use bitcoin::secp256k1::{SecretKey,PublicKey};
7710         use bitcoin::hashes::sha256::Hash as Sha256;
7711         use bitcoin::hashes::Hash;
7712         use bitcoin::hash_types::WPubkeyHash;
7713         use bitcoin::PackedLockTime;
7714         use bitcoin::util::address::WitnessVersion;
7715         use crate::prelude::*;
7716
7717         struct TestFeeEstimator {
7718                 fee_est: u32
7719         }
7720         impl FeeEstimator for TestFeeEstimator {
7721                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7722                         self.fee_est
7723                 }
7724         }
7725
7726         #[test]
7727         fn test_max_funding_satoshis_no_wumbo() {
7728                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7729                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7730                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7731         }
7732
7733         #[test]
7734         fn test_no_fee_check_overflow() {
7735                 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7736                 // arithmetic, causing a panic with debug assertions enabled.
7737                 let fee_est = TestFeeEstimator { fee_est: 42 };
7738                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7739                 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
7740                         &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
7741                         u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7742         }
7743
7744         struct Keys {
7745                 signer: InMemorySigner,
7746         }
7747
7748         impl EntropySource for Keys {
7749                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7750         }
7751
7752         impl SignerProvider for Keys {
7753                 type Signer = InMemorySigner;
7754
7755                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7756                         self.signer.channel_keys_id()
7757                 }
7758
7759                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7760                         self.signer.clone()
7761                 }
7762
7763                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7764
7765                 fn get_destination_script(&self) -> Result<Script, ()> {
7766                         let secp_ctx = Secp256k1::signing_only();
7767                         let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7768                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7769                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7770                 }
7771
7772                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7773                         let secp_ctx = Secp256k1::signing_only();
7774                         let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7775                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7776                 }
7777         }
7778
7779         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7780         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7781                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7782         }
7783
7784         #[test]
7785         fn upfront_shutdown_script_incompatibility() {
7786                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7787                 let non_v0_segwit_shutdown_script =
7788                         ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7789
7790                 let seed = [42; 32];
7791                 let network = Network::Testnet;
7792                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7793                 keys_provider.expect(OnGetShutdownScriptpubkey {
7794                         returns: non_v0_segwit_shutdown_script.clone(),
7795                 });
7796
7797                 let secp_ctx = Secp256k1::new();
7798                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7799                 let config = UserConfig::default();
7800                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7801                         Err(APIError::IncompatibleShutdownScript { script }) => {
7802                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7803                         },
7804                         Err(e) => panic!("Unexpected error: {:?}", e),
7805                         Ok(_) => panic!("Expected error"),
7806                 }
7807         }
7808
7809         // Check that, during channel creation, we use the same feerate in the open channel message
7810         // as we do in the Channel object creation itself.
7811         #[test]
7812         fn test_open_channel_msg_fee() {
7813                 let original_fee = 253;
7814                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7815                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7816                 let secp_ctx = Secp256k1::new();
7817                 let seed = [42; 32];
7818                 let network = Network::Testnet;
7819                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7820
7821                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7822                 let config = UserConfig::default();
7823                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7824
7825                 // Now change the fee so we can check that the fee in the open_channel message is the
7826                 // same as the old fee.
7827                 fee_est.fee_est = 500;
7828                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7829                 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7830         }
7831
7832         #[test]
7833         fn test_holder_vs_counterparty_dust_limit() {
7834                 // Test that when calculating the local and remote commitment transaction fees, the correct
7835                 // dust limits are used.
7836                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7837                 let secp_ctx = Secp256k1::new();
7838                 let seed = [42; 32];
7839                 let network = Network::Testnet;
7840                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7841                 let logger = test_utils::TestLogger::new();
7842                 let best_block = BestBlock::from_network(network);
7843
7844                 // Go through the flow of opening a channel between two nodes, making sure
7845                 // they have different dust limits.
7846
7847                 // Create Node A's channel pointing to Node B's pubkey
7848                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7849                 let config = UserConfig::default();
7850                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7851
7852                 // Create Node B's channel by receiving Node A's open_channel message
7853                 // Make sure A's dust limit is as we expect.
7854                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7855                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7856                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7857
7858                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7859                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
7860                 accept_channel_msg.dust_limit_satoshis = 546;
7861                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7862                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7863
7864                 // Node A --> Node B: funding created
7865                 let output_script = node_a_chan.context.get_funding_redeemscript();
7866                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7867                         value: 10000000, script_pubkey: output_script.clone(),
7868                 }]};
7869                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7870                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7871                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7872
7873                 // Node B --> Node A: funding signed
7874                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7875
7876                 // Put some inbound and outbound HTLCs in A's channel.
7877                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7878                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7879                         htlc_id: 0,
7880                         amount_msat: htlc_amount_msat,
7881                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7882                         cltv_expiry: 300000000,
7883                         state: InboundHTLCState::Committed,
7884                 });
7885
7886                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7887                         htlc_id: 1,
7888                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7889                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7890                         cltv_expiry: 200000000,
7891                         state: OutboundHTLCState::Committed,
7892                         source: HTLCSource::OutboundRoute {
7893                                 path: Path { hops: Vec::new(), blinded_tail: None },
7894                                 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7895                                 first_hop_htlc_msat: 548,
7896                                 payment_id: PaymentId([42; 32]),
7897                         },
7898                         skimmed_fee_msat: None,
7899                 });
7900
7901                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7902                 // the dust limit check.
7903                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7904                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7905                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
7906                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7907
7908                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7909                 // of the HTLCs are seen to be above the dust limit.
7910                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7911                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
7912                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7913                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7914                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7915         }
7916
7917         #[test]
7918         fn test_timeout_vs_success_htlc_dust_limit() {
7919                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7920                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7921                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7922                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7923                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7924                 let secp_ctx = Secp256k1::new();
7925                 let seed = [42; 32];
7926                 let network = Network::Testnet;
7927                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7928
7929                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7930                 let config = UserConfig::default();
7931                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7932
7933                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
7934                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
7935
7936                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7937                 // counted as dust when it shouldn't be.
7938                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7939                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7940                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7941                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7942
7943                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7944                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7945                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7946                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7947                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7948
7949                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7950
7951                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7952                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7953                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7954                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7955                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7956
7957                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7958                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7959                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7960                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7961                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7962         }
7963
7964         #[test]
7965         fn channel_reestablish_no_updates() {
7966                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7967                 let logger = test_utils::TestLogger::new();
7968                 let secp_ctx = Secp256k1::new();
7969                 let seed = [42; 32];
7970                 let network = Network::Testnet;
7971                 let best_block = BestBlock::from_network(network);
7972                 let chain_hash = ChainHash::using_genesis_block(network);
7973                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7974
7975                 // Go through the flow of opening a channel between two nodes.
7976
7977                 // Create Node A's channel pointing to Node B's pubkey
7978                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7979                 let config = UserConfig::default();
7980                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7981
7982                 // Create Node B's channel by receiving Node A's open_channel message
7983                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
7984                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7985                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7986
7987                 // Node B --> Node A: accept channel
7988                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
7989                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7990
7991                 // Node A --> Node B: funding created
7992                 let output_script = node_a_chan.context.get_funding_redeemscript();
7993                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7994                         value: 10000000, script_pubkey: output_script.clone(),
7995                 }]};
7996                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7997                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7998                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7999
8000                 // Node B --> Node A: funding signed
8001                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8002
8003                 // Now disconnect the two nodes and check that the commitment point in
8004                 // Node B's channel_reestablish message is sane.
8005                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8006                 let msg = node_b_chan.get_channel_reestablish(&&logger);
8007                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8008                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8009                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8010
8011                 // Check that the commitment point in Node A's channel_reestablish message
8012                 // is sane.
8013                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8014                 let msg = node_a_chan.get_channel_reestablish(&&logger);
8015                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8016                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8017                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8018         }
8019
8020         #[test]
8021         fn test_configured_holder_max_htlc_value_in_flight() {
8022                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8023                 let logger = test_utils::TestLogger::new();
8024                 let secp_ctx = Secp256k1::new();
8025                 let seed = [42; 32];
8026                 let network = Network::Testnet;
8027                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8028                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8029                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8030
8031                 let mut config_2_percent = UserConfig::default();
8032                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8033                 let mut config_99_percent = UserConfig::default();
8034                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8035                 let mut config_0_percent = UserConfig::default();
8036                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8037                 let mut config_101_percent = UserConfig::default();
8038                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8039
8040                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8041                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8042                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8043                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
8044                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8045                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8046
8047                 // Test with the upper bound - 1 of valid values (99%).
8048                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
8049                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8050                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8051
8052                 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8053
8054                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8055                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8056                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8057                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8058                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8059                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8060
8061                 // Test with the upper bound - 1 of valid values (99%).
8062                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8063                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8064                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8065
8066                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8067                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8068                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
8069                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8070                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8071
8072                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8073                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8074                 // than 100.
8075                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
8076                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8077                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8078
8079                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8080                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8081                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8082                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8083                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8084
8085                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8086                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8087                 // than 100.
8088                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8089                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8090                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8091         }
8092
8093         #[test]
8094         fn test_configured_holder_selected_channel_reserve_satoshis() {
8095
8096                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8097                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8098                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8099
8100                 // Test with valid but unreasonably high channel reserves
8101                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8102                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8103                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8104
8105                 // Test with calculated channel reserve less than lower bound
8106                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8107                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8108
8109                 // Test with invalid channel reserves since sum of both is greater than or equal
8110                 // to channel value
8111                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8112                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8113         }
8114
8115         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8116                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8117                 let logger = test_utils::TestLogger::new();
8118                 let secp_ctx = Secp256k1::new();
8119                 let seed = [42; 32];
8120                 let network = Network::Testnet;
8121                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8122                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8123                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8124
8125
8126                 let mut outbound_node_config = UserConfig::default();
8127                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8128                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
8129
8130                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8131                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8132
8133                 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8134                 let mut inbound_node_config = UserConfig::default();
8135                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8136
8137                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8138                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8139
8140                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8141
8142                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8143                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8144                 } else {
8145                         // Channel Negotiations failed
8146                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8147                         assert!(result.is_err());
8148                 }
8149         }
8150
8151         #[test]
8152         fn channel_update() {
8153                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8154                 let logger = test_utils::TestLogger::new();
8155                 let secp_ctx = Secp256k1::new();
8156                 let seed = [42; 32];
8157                 let network = Network::Testnet;
8158                 let best_block = BestBlock::from_network(network);
8159                 let chain_hash = ChainHash::using_genesis_block(network);
8160                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8161
8162                 // Create Node A's channel pointing to Node B's pubkey
8163                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8164                 let config = UserConfig::default();
8165                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8166
8167                 // Create Node B's channel by receiving Node A's open_channel message
8168                 // Make sure A's dust limit is as we expect.
8169                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8170                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8171                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8172
8173                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8174                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8175                 accept_channel_msg.dust_limit_satoshis = 546;
8176                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8177                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8178
8179                 // Node A --> Node B: funding created
8180                 let output_script = node_a_chan.context.get_funding_redeemscript();
8181                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8182                         value: 10000000, script_pubkey: output_script.clone(),
8183                 }]};
8184                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8185                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8186                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8187
8188                 // Node B --> Node A: funding signed
8189                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8190
8191                 // Make sure that receiving a channel update will update the Channel as expected.
8192                 let update = ChannelUpdate {
8193                         contents: UnsignedChannelUpdate {
8194                                 chain_hash,
8195                                 short_channel_id: 0,
8196                                 timestamp: 0,
8197                                 flags: 0,
8198                                 cltv_expiry_delta: 100,
8199                                 htlc_minimum_msat: 5,
8200                                 htlc_maximum_msat: MAX_VALUE_MSAT,
8201                                 fee_base_msat: 110,
8202                                 fee_proportional_millionths: 11,
8203                                 excess_data: Vec::new(),
8204                         },
8205                         signature: Signature::from(unsafe { FFISignature::new() })
8206                 };
8207                 assert!(node_a_chan.channel_update(&update).unwrap());
8208
8209                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8210                 // change our official htlc_minimum_msat.
8211                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8212                 match node_a_chan.context.counterparty_forwarding_info() {
8213                         Some(info) => {
8214                                 assert_eq!(info.cltv_expiry_delta, 100);
8215                                 assert_eq!(info.fee_base_msat, 110);
8216                                 assert_eq!(info.fee_proportional_millionths, 11);
8217                         },
8218                         None => panic!("expected counterparty forwarding info to be Some")
8219                 }
8220
8221                 assert!(!node_a_chan.channel_update(&update).unwrap());
8222         }
8223
8224         #[cfg(feature = "_test_vectors")]
8225         #[test]
8226         fn outbound_commitment_test() {
8227                 use bitcoin::util::sighash;
8228                 use bitcoin::consensus::encode::serialize;
8229                 use bitcoin::blockdata::transaction::EcdsaSighashType;
8230                 use bitcoin::hashes::hex::FromHex;
8231                 use bitcoin::hash_types::Txid;
8232                 use bitcoin::secp256k1::Message;
8233                 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
8234                 use crate::ln::PaymentPreimage;
8235                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8236                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8237                 use crate::util::logger::Logger;
8238                 use crate::sync::Arc;
8239
8240                 // Test vectors from BOLT 3 Appendices C and F (anchors):
8241                 let feeest = TestFeeEstimator{fee_est: 15000};
8242                 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8243                 let secp_ctx = Secp256k1::new();
8244
8245                 let mut signer = InMemorySigner::new(
8246                         &secp_ctx,
8247                         SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8248                         SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8249                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8250                         SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8251                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8252
8253                         // These aren't set in the test vectors:
8254                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8255                         10_000_000,
8256                         [0; 32],
8257                         [0; 32],
8258                 );
8259
8260                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8261                                 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8262                 let keys_provider = Keys { signer: signer.clone() };
8263
8264                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8265                 let mut config = UserConfig::default();
8266                 config.channel_handshake_config.announced_channel = false;
8267                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
8268                 chan.context.holder_dust_limit_satoshis = 546;
8269                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8270
8271                 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8272
8273                 let counterparty_pubkeys = ChannelPublicKeys {
8274                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8275                         revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8276                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8277                         delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8278                         htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8279                 };
8280                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8281                         CounterpartyChannelTransactionParameters {
8282                                 pubkeys: counterparty_pubkeys.clone(),
8283                                 selected_contest_delay: 144
8284                         });
8285                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8286                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8287
8288                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8289                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8290
8291                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8292                            hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8293
8294                 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8295                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8296
8297                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8298                 // derived from a commitment_seed, so instead we copy it here and call
8299                 // build_commitment_transaction.
8300                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8301                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8302                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8303                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8304                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8305
8306                 macro_rules! test_commitment {
8307                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8308                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8309                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8310                         };
8311                 }
8312
8313                 macro_rules! test_commitment_with_anchors {
8314                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8315                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8316                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8317                         };
8318                 }
8319
8320                 macro_rules! test_commitment_common {
8321                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8322                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8323                         } ) => { {
8324                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8325                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8326
8327                                         let htlcs = commitment_stats.htlcs_included.drain(..)
8328                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8329                                                 .collect();
8330                                         (commitment_stats.tx, htlcs)
8331                                 };
8332                                 let trusted_tx = commitment_tx.trust();
8333                                 let unsigned_tx = trusted_tx.built_transaction();
8334                                 let redeemscript = chan.context.get_funding_redeemscript();
8335                                 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8336                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8337                                 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8338                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8339
8340                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8341                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8342                                 let mut counterparty_htlc_sigs = Vec::new();
8343                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8344                                 $({
8345                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8346                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8347                                         counterparty_htlc_sigs.push(remote_signature);
8348                                 })*
8349                                 assert_eq!(htlcs.len(), per_htlc.len());
8350
8351                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
8352                                         commitment_tx.clone(),
8353                                         counterparty_signature,
8354                                         counterparty_htlc_sigs,
8355                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8356                                         chan.context.counterparty_funding_pubkey()
8357                                 );
8358                                 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8359                                 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8360
8361                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
8362                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8363                                 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8364
8365                                 // ((htlc, counterparty_sig), (index, holder_sig))
8366                                 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8367
8368                                 $({
8369                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
8370                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8371
8372                                         let ref htlc = htlcs[$htlc_idx];
8373                                         let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8374                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8375                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8376                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8377                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8378                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8379                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8380
8381                                         let mut preimage: Option<PaymentPreimage> = None;
8382                                         if !htlc.offered {
8383                                                 for i in 0..5 {
8384                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8385                                                         if out == htlc.payment_hash {
8386                                                                 preimage = Some(PaymentPreimage([i; 32]));
8387                                                         }
8388                                                 }
8389
8390                                                 assert!(preimage.is_some());
8391                                         }
8392
8393                                         let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8394                                         let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8395                                                 channel_derivation_parameters: ChannelDerivationParameters {
8396                                                         value_satoshis: chan.context.channel_value_satoshis,
8397                                                         keys_id: chan.context.channel_keys_id,
8398                                                         transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8399                                                 },
8400                                                 commitment_txid: trusted_tx.txid(),
8401                                                 per_commitment_number: trusted_tx.commitment_number(),
8402                                                 per_commitment_point: trusted_tx.per_commitment_point(),
8403                                                 feerate_per_kw: trusted_tx.feerate_per_kw(),
8404                                                 htlc: htlc.clone(),
8405                                                 preimage: preimage.clone(),
8406                                                 counterparty_sig: *htlc_counterparty_sig,
8407                                         }, &secp_ctx).unwrap();
8408                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8409                                         assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8410
8411                                         let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8412                                         assert_eq!(signature, htlc_holder_sig, "htlc sig");
8413                                         let trusted_tx = holder_commitment_tx.trust();
8414                                         htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8415                                         log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
8416                                         assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8417                                 })*
8418                                 assert!(htlc_counterparty_sig_iter.next().is_none());
8419                         } }
8420                 }
8421
8422                 // anchors: simple commitment tx with no HTLCs and single anchor
8423                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8424                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8425                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8426
8427                 // simple commitment tx with no HTLCs
8428                 chan.context.value_to_self_msat = 7000000000;
8429
8430                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8431                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8432                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8433
8434                 // anchors: simple commitment tx with no HTLCs
8435                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8436                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8437                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8438
8439                 chan.context.pending_inbound_htlcs.push({
8440                         let mut out = InboundHTLCOutput{
8441                                 htlc_id: 0,
8442                                 amount_msat: 1000000,
8443                                 cltv_expiry: 500,
8444                                 payment_hash: PaymentHash([0; 32]),
8445                                 state: InboundHTLCState::Committed,
8446                         };
8447                         out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8448                         out
8449                 });
8450                 chan.context.pending_inbound_htlcs.push({
8451                         let mut out = InboundHTLCOutput{
8452                                 htlc_id: 1,
8453                                 amount_msat: 2000000,
8454                                 cltv_expiry: 501,
8455                                 payment_hash: PaymentHash([0; 32]),
8456                                 state: InboundHTLCState::Committed,
8457                         };
8458                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8459                         out
8460                 });
8461                 chan.context.pending_outbound_htlcs.push({
8462                         let mut out = OutboundHTLCOutput{
8463                                 htlc_id: 2,
8464                                 amount_msat: 2000000,
8465                                 cltv_expiry: 502,
8466                                 payment_hash: PaymentHash([0; 32]),
8467                                 state: OutboundHTLCState::Committed,
8468                                 source: HTLCSource::dummy(),
8469                                 skimmed_fee_msat: None,
8470                         };
8471                         out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8472                         out
8473                 });
8474                 chan.context.pending_outbound_htlcs.push({
8475                         let mut out = OutboundHTLCOutput{
8476                                 htlc_id: 3,
8477                                 amount_msat: 3000000,
8478                                 cltv_expiry: 503,
8479                                 payment_hash: PaymentHash([0; 32]),
8480                                 state: OutboundHTLCState::Committed,
8481                                 source: HTLCSource::dummy(),
8482                                 skimmed_fee_msat: None,
8483                         };
8484                         out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8485                         out
8486                 });
8487                 chan.context.pending_inbound_htlcs.push({
8488                         let mut out = InboundHTLCOutput{
8489                                 htlc_id: 4,
8490                                 amount_msat: 4000000,
8491                                 cltv_expiry: 504,
8492                                 payment_hash: PaymentHash([0; 32]),
8493                                 state: InboundHTLCState::Committed,
8494                         };
8495                         out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8496                         out
8497                 });
8498
8499                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8500                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8501                 chan.context.feerate_per_kw = 0;
8502
8503                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8504                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8505                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8506
8507                                   { 0,
8508                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8509                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8510                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8511
8512                                   { 1,
8513                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8514                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8515                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8516
8517                                   { 2,
8518                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8519                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8520                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8521
8522                                   { 3,
8523                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8524                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8525                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8526
8527                                   { 4,
8528                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8529                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8530                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8531                 } );
8532
8533                 // commitment tx with seven outputs untrimmed (maximum feerate)
8534                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8535                 chan.context.feerate_per_kw = 647;
8536
8537                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8538                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8539                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8540
8541                                   { 0,
8542                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8543                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8544                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8545
8546                                   { 1,
8547                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8548                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8549                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8550
8551                                   { 2,
8552                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8553                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8554                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8555
8556                                   { 3,
8557                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8558                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8559                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8560
8561                                   { 4,
8562                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8563                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8564                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8565                 } );
8566
8567                 // commitment tx with six outputs untrimmed (minimum feerate)
8568                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8569                 chan.context.feerate_per_kw = 648;
8570
8571                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8572                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8573                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8574
8575                                   { 0,
8576                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8577                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8578                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8579
8580                                   { 1,
8581                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8582                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8583                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8584
8585                                   { 2,
8586                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8587                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8588                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8589
8590                                   { 3,
8591                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8592                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8593                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8594                 } );
8595
8596                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8597                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8598                 chan.context.feerate_per_kw = 645;
8599                 chan.context.holder_dust_limit_satoshis = 1001;
8600
8601                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8602                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8603                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8604
8605                                   { 0,
8606                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8607                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8608                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8609
8610                                   { 1,
8611                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8612                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8613                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8614
8615                                   { 2,
8616                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8617                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8618                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8619
8620                                   { 3,
8621                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8622                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8623                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8624                 } );
8625
8626                 // commitment tx with six outputs untrimmed (maximum feerate)
8627                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8628                 chan.context.feerate_per_kw = 2069;
8629                 chan.context.holder_dust_limit_satoshis = 546;
8630
8631                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8632                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8633                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8634
8635                                   { 0,
8636                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8637                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8638                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8639
8640                                   { 1,
8641                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8642                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8643                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8644
8645                                   { 2,
8646                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8647                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8648                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8649
8650                                   { 3,
8651                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8652                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8653                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8654                 } );
8655
8656                 // commitment tx with five outputs untrimmed (minimum feerate)
8657                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8658                 chan.context.feerate_per_kw = 2070;
8659
8660                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8661                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8662                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8663
8664                                   { 0,
8665                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8666                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8667                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8668
8669                                   { 1,
8670                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8671                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8672                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8673
8674                                   { 2,
8675                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8676                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8677                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8678                 } );
8679
8680                 // commitment tx with five outputs untrimmed (maximum feerate)
8681                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8682                 chan.context.feerate_per_kw = 2194;
8683
8684                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8685                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8686                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8687
8688                                   { 0,
8689                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8690                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8691                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8692
8693                                   { 1,
8694                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8695                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8696                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8697
8698                                   { 2,
8699                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8700                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8701                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8702                 } );
8703
8704                 // commitment tx with four outputs untrimmed (minimum feerate)
8705                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8706                 chan.context.feerate_per_kw = 2195;
8707
8708                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8709                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8710                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8711
8712                                   { 0,
8713                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8714                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8715                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8716
8717                                   { 1,
8718                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8719                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8720                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8721                 } );
8722
8723                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8724                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8725                 chan.context.feerate_per_kw = 2185;
8726                 chan.context.holder_dust_limit_satoshis = 2001;
8727                 let cached_channel_type = chan.context.channel_type;
8728                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8729
8730                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8731                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8732                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8733
8734                                   { 0,
8735                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8736                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8737                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8738
8739                                   { 1,
8740                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8741                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8742                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8743                 } );
8744
8745                 // commitment tx with four outputs untrimmed (maximum feerate)
8746                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8747                 chan.context.feerate_per_kw = 3702;
8748                 chan.context.holder_dust_limit_satoshis = 546;
8749                 chan.context.channel_type = cached_channel_type.clone();
8750
8751                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8752                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8753                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8754
8755                                   { 0,
8756                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8757                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8758                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8759
8760                                   { 1,
8761                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8762                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8763                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8764                 } );
8765
8766                 // commitment tx with three outputs untrimmed (minimum feerate)
8767                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8768                 chan.context.feerate_per_kw = 3703;
8769
8770                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8771                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8772                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8773
8774                                   { 0,
8775                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8776                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8777                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8778                 } );
8779
8780                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8781                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8782                 chan.context.feerate_per_kw = 3687;
8783                 chan.context.holder_dust_limit_satoshis = 3001;
8784                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8785
8786                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8787                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8788                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8789
8790                                   { 0,
8791                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8792                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8793                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8794                 } );
8795
8796                 // commitment tx with three outputs untrimmed (maximum feerate)
8797                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8798                 chan.context.feerate_per_kw = 4914;
8799                 chan.context.holder_dust_limit_satoshis = 546;
8800                 chan.context.channel_type = cached_channel_type.clone();
8801
8802                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8803                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8804                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8805
8806                                   { 0,
8807                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8808                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8809                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8810                 } );
8811
8812                 // commitment tx with two outputs untrimmed (minimum feerate)
8813                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8814                 chan.context.feerate_per_kw = 4915;
8815                 chan.context.holder_dust_limit_satoshis = 546;
8816
8817                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8818                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8819                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8820
8821                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8822                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8823                 chan.context.feerate_per_kw = 4894;
8824                 chan.context.holder_dust_limit_satoshis = 4001;
8825                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8826
8827                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8828                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8829                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8830
8831                 // commitment tx with two outputs untrimmed (maximum feerate)
8832                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8833                 chan.context.feerate_per_kw = 9651180;
8834                 chan.context.holder_dust_limit_satoshis = 546;
8835                 chan.context.channel_type = cached_channel_type.clone();
8836
8837                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8838                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8839                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8840
8841                 // commitment tx with one output untrimmed (minimum feerate)
8842                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8843                 chan.context.feerate_per_kw = 9651181;
8844
8845                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8846                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8847                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8848
8849                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8850                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8851                 chan.context.feerate_per_kw = 6216010;
8852                 chan.context.holder_dust_limit_satoshis = 4001;
8853                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8854
8855                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8856                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8857                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8858
8859                 // commitment tx with fee greater than funder amount
8860                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8861                 chan.context.feerate_per_kw = 9651936;
8862                 chan.context.holder_dust_limit_satoshis = 546;
8863                 chan.context.channel_type = cached_channel_type;
8864
8865                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8866                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8867                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8868
8869                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8870                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8871                 chan.context.feerate_per_kw = 253;
8872                 chan.context.pending_inbound_htlcs.clear();
8873                 chan.context.pending_inbound_htlcs.push({
8874                         let mut out = InboundHTLCOutput{
8875                                 htlc_id: 1,
8876                                 amount_msat: 2000000,
8877                                 cltv_expiry: 501,
8878                                 payment_hash: PaymentHash([0; 32]),
8879                                 state: InboundHTLCState::Committed,
8880                         };
8881                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8882                         out
8883                 });
8884                 chan.context.pending_outbound_htlcs.clear();
8885                 chan.context.pending_outbound_htlcs.push({
8886                         let mut out = OutboundHTLCOutput{
8887                                 htlc_id: 6,
8888                                 amount_msat: 5000001,
8889                                 cltv_expiry: 506,
8890                                 payment_hash: PaymentHash([0; 32]),
8891                                 state: OutboundHTLCState::Committed,
8892                                 source: HTLCSource::dummy(),
8893                                 skimmed_fee_msat: None,
8894                         };
8895                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8896                         out
8897                 });
8898                 chan.context.pending_outbound_htlcs.push({
8899                         let mut out = OutboundHTLCOutput{
8900                                 htlc_id: 5,
8901                                 amount_msat: 5000000,
8902                                 cltv_expiry: 505,
8903                                 payment_hash: PaymentHash([0; 32]),
8904                                 state: OutboundHTLCState::Committed,
8905                                 source: HTLCSource::dummy(),
8906                                 skimmed_fee_msat: None,
8907                         };
8908                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8909                         out
8910                 });
8911
8912                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8913                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8914                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8915
8916                                   { 0,
8917                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8918                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8919                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8920                                   { 1,
8921                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8922                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8923                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8924                                   { 2,
8925                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8926                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8927                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8928                 } );
8929
8930                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8931                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8932                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8933                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8934
8935                                   { 0,
8936                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8937                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8938                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8939                                   { 1,
8940                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8941                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8942                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8943                                   { 2,
8944                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8945                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8946                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8947                 } );
8948         }
8949
8950         #[test]
8951         fn test_per_commitment_secret_gen() {
8952                 // Test vectors from BOLT 3 Appendix D:
8953
8954                 let mut seed = [0; 32];
8955                 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8956                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8957                            hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8958
8959                 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8960                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8961                            hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8962
8963                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8964                            hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8965
8966                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8967                            hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8968
8969                 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8970                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8971                            hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
8972         }
8973
8974         #[test]
8975         fn test_key_derivation() {
8976                 // Test vectors from BOLT 3 Appendix E:
8977                 let secp_ctx = Secp256k1::new();
8978
8979                 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
8980                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8981
8982                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
8983                 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
8984
8985                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8986                 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
8987
8988                 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8989                                 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
8990
8991                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
8992                                 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
8993
8994                 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8995                                 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
8996
8997                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
8998                                 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
8999         }
9000
9001         #[test]
9002         fn test_zero_conf_channel_type_support() {
9003                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9004                 let secp_ctx = Secp256k1::new();
9005                 let seed = [42; 32];
9006                 let network = Network::Testnet;
9007                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9008                 let logger = test_utils::TestLogger::new();
9009
9010                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9011                 let config = UserConfig::default();
9012                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9013                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
9014
9015                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9016                 channel_type_features.set_zero_conf_required();
9017
9018                 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9019                 open_channel_msg.channel_type = Some(channel_type_features);
9020                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9021                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9022                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9023                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9024                 assert!(res.is_ok());
9025         }
9026
9027         #[test]
9028         fn test_supports_anchors_zero_htlc_tx_fee() {
9029                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9030                 // resulting `channel_type`.
9031                 let secp_ctx = Secp256k1::new();
9032                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9033                 let network = Network::Testnet;
9034                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9035                 let logger = test_utils::TestLogger::new();
9036
9037                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9038                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9039
9040                 let mut config = UserConfig::default();
9041                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9042
9043                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9044                 // need to signal it.
9045                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9046                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9047                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9048                         &config, 0, 42
9049                 ).unwrap();
9050                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9051
9052                 let mut expected_channel_type = ChannelTypeFeatures::empty();
9053                 expected_channel_type.set_static_remote_key_required();
9054                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9055
9056                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9057                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9058                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9059                 ).unwrap();
9060
9061                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9062                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9063                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9064                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9065                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9066                 ).unwrap();
9067
9068                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9069                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9070         }
9071
9072         #[test]
9073         fn test_rejects_implicit_simple_anchors() {
9074                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9075                 // each side's `InitFeatures`, it is rejected.
9076                 let secp_ctx = Secp256k1::new();
9077                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9078                 let network = Network::Testnet;
9079                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9080                 let logger = test_utils::TestLogger::new();
9081
9082                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9083                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9084
9085                 let config = UserConfig::default();
9086
9087                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9088                 let static_remote_key_required: u64 = 1 << 12;
9089                 let simple_anchors_required: u64 = 1 << 20;
9090                 let raw_init_features = static_remote_key_required | simple_anchors_required;
9091                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9092
9093                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9094                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9095                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9096                 ).unwrap();
9097
9098                 // Set `channel_type` to `None` to force the implicit feature negotiation.
9099                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9100                 open_channel_msg.channel_type = None;
9101
9102                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9103                 // `static_remote_key`, it will fail the channel.
9104                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9105                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9106                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9107                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9108                 );
9109                 assert!(channel_b.is_err());
9110         }
9111
9112         #[test]
9113         fn test_rejects_simple_anchors_channel_type() {
9114                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9115                 // it is rejected.
9116                 let secp_ctx = Secp256k1::new();
9117                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9118                 let network = Network::Testnet;
9119                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9120                 let logger = test_utils::TestLogger::new();
9121
9122                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9123                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9124
9125                 let config = UserConfig::default();
9126
9127                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9128                 let static_remote_key_required: u64 = 1 << 12;
9129                 let simple_anchors_required: u64 = 1 << 20;
9130                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9131                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9132                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9133                 assert!(!simple_anchors_init.requires_unknown_bits());
9134                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9135
9136                 // First, we'll try to open a channel between A and B where A requests a channel type for
9137                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9138                 // B as it's not supported by LDK.
9139                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9140                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9141                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9142                 ).unwrap();
9143
9144                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9145                 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9146
9147                 let res = InboundV1Channel::<&TestKeysInterface>::new(
9148                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9149                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9150                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9151                 );
9152                 assert!(res.is_err());
9153
9154                 // Then, we'll try to open another channel where A requests a channel type for
9155                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9156                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9157                 // LDK.
9158                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9159                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9160                         10000000, 100000, 42, &config, 0, 42
9161                 ).unwrap();
9162
9163                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9164
9165                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9166                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9167                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9168                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9169                 ).unwrap();
9170
9171                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9172                 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9173
9174                 let res = channel_a.accept_channel(
9175                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9176                 );
9177                 assert!(res.is_err());
9178         }
9179
9180         #[test]
9181         fn test_waiting_for_batch() {
9182                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9183                 let logger = test_utils::TestLogger::new();
9184                 let secp_ctx = Secp256k1::new();
9185                 let seed = [42; 32];
9186                 let network = Network::Testnet;
9187                 let best_block = BestBlock::from_network(network);
9188                 let chain_hash = ChainHash::using_genesis_block(network);
9189                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9190
9191                 let mut config = UserConfig::default();
9192                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9193                 // channel in a batch before all channels are ready.
9194                 config.channel_handshake_limits.trust_own_funding_0conf = true;
9195
9196                 // Create a channel from node a to node b that will be part of batch funding.
9197                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9198                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9199                         &feeest,
9200                         &&keys_provider,
9201                         &&keys_provider,
9202                         node_b_node_id,
9203                         &channelmanager::provided_init_features(&config),
9204                         10000000,
9205                         100000,
9206                         42,
9207                         &config,
9208                         0,
9209                         42,
9210                 ).unwrap();
9211
9212                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9213                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9214                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9215                         &feeest,
9216                         &&keys_provider,
9217                         &&keys_provider,
9218                         node_b_node_id,
9219                         &channelmanager::provided_channel_type_features(&config),
9220                         &channelmanager::provided_init_features(&config),
9221                         &open_channel_msg,
9222                         7,
9223                         &config,
9224                         0,
9225                         &&logger,
9226                         true,  // Allow node b to send a 0conf channel_ready.
9227                 ).unwrap();
9228
9229                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9230                 node_a_chan.accept_channel(
9231                         &accept_channel_msg,
9232                         &config.channel_handshake_limits,
9233                         &channelmanager::provided_init_features(&config),
9234                 ).unwrap();
9235
9236                 // Fund the channel with a batch funding transaction.
9237                 let output_script = node_a_chan.context.get_funding_redeemscript();
9238                 let tx = Transaction {
9239                         version: 1,
9240                         lock_time: PackedLockTime::ZERO,
9241                         input: Vec::new(),
9242                         output: vec![
9243                                 TxOut {
9244                                         value: 10000000, script_pubkey: output_script.clone(),
9245                                 },
9246                                 TxOut {
9247                                         value: 10000000, script_pubkey: Builder::new().into_script(),
9248                                 },
9249                         ]};
9250                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9251                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9252                         tx.clone(),
9253                         funding_outpoint,
9254                         true,
9255                         &&logger,
9256                 ).map_err(|_| ()).unwrap();
9257                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9258                         &funding_created_msg,
9259                         best_block,
9260                         &&keys_provider,
9261                         &&logger,
9262                 ).map_err(|_| ()).unwrap();
9263                 let node_b_updates = node_b_chan.monitor_updating_restored(
9264                         &&logger,
9265                         &&keys_provider,
9266                         chain_hash,
9267                         &config,
9268                         0,
9269                 );
9270
9271                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9272                 // broadcasting the funding transaction until the batch is ready.
9273                 let _ = node_a_chan.funding_signed(
9274                         &funding_signed_msg,
9275                         best_block,
9276                         &&keys_provider,
9277                         &&logger,
9278                 ).unwrap();
9279                 let node_a_updates = node_a_chan.monitor_updating_restored(
9280                         &&logger,
9281                         &&keys_provider,
9282                         chain_hash,
9283                         &config,
9284                         0,
9285                 );
9286                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9287                 // as the funding transaction depends on all channels in the batch becoming ready.
9288                 assert!(node_a_updates.channel_ready.is_none());
9289                 assert!(node_a_updates.funding_broadcastable.is_none());
9290                 assert_eq!(
9291                         node_a_chan.context.channel_state,
9292                         ChannelState::FundingSent as u32 |
9293                         ChannelState::WaitingForBatch as u32,
9294                 );
9295
9296                 // It is possible to receive a 0conf channel_ready from the remote node.
9297                 node_a_chan.channel_ready(
9298                         &node_b_updates.channel_ready.unwrap(),
9299                         &&keys_provider,
9300                         chain_hash,
9301                         &config,
9302                         &best_block,
9303                         &&logger,
9304                 ).unwrap();
9305                 assert_eq!(
9306                         node_a_chan.context.channel_state,
9307                         ChannelState::FundingSent as u32 |
9308                         ChannelState::WaitingForBatch as u32 |
9309                         ChannelState::TheirChannelReady as u32,
9310                 );
9311
9312                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9313                 node_a_chan.set_batch_ready();
9314                 assert_eq!(
9315                         node_a_chan.context.channel_state,
9316                         ChannelState::FundingSent as u32 |
9317                         ChannelState::TheirChannelReady as u32,
9318                 );
9319                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
9320         }
9321 }