ce15eb2743787d0236fa03537e072f3559898e31
[rust-lightning] / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script,Builder};
12 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
13 use bitcoin::util::sighash;
14 use bitcoin::consensus::encode;
15
16 use bitcoin::hashes::Hash;
17 use bitcoin::hashes::sha256::Hash as Sha256;
18 use bitcoin::hashes::sha256d::Hash as Sha256d;
19 use bitcoin::hash_types::{Txid, BlockHash};
20
21 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
22 use bitcoin::secp256k1::{PublicKey,SecretKey};
23 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
24 use bitcoin::secp256k1;
25
26 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
27 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
28 use crate::ln::msgs;
29 use crate::ln::msgs::DecodeError;
30 use crate::ln::script::{self, ShutdownScript};
31 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
32 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
33 use crate::ln::chan_utils;
34 use crate::ln::onion_utils::HTLCFailReason;
35 use crate::chain::BestBlock;
36 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
37 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
38 use crate::chain::transaction::{OutPoint, TransactionData};
39 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
40 use crate::events::ClosureReason;
41 use crate::routing::gossip::NodeId;
42 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
43 use crate::util::logger::Logger;
44 use crate::util::errors::APIError;
45 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
46 use crate::util::scid_utils::scid_from_parts;
47
48 use crate::io;
49 use crate::prelude::*;
50 use core::{cmp,mem,fmt};
51 use core::convert::TryInto;
52 use core::ops::Deref;
53 #[cfg(any(test, fuzzing, debug_assertions))]
54 use crate::sync::Mutex;
55 use bitcoin::hashes::hex::ToHex;
56 use crate::sign::type_resolver::ChannelSignerType;
57
58 #[cfg(test)]
59 pub struct ChannelValueStat {
60         pub value_to_self_msat: u64,
61         pub channel_value_msat: u64,
62         pub channel_reserve_msat: u64,
63         pub pending_outbound_htlcs_amount_msat: u64,
64         pub pending_inbound_htlcs_amount_msat: u64,
65         pub holding_cell_outbound_amount_msat: u64,
66         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
67         pub counterparty_dust_limit_msat: u64,
68 }
69
70 pub struct AvailableBalances {
71         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
72         pub balance_msat: u64,
73         /// Total amount available for our counterparty to send to us.
74         pub inbound_capacity_msat: u64,
75         /// Total amount available for us to send to our counterparty.
76         pub outbound_capacity_msat: u64,
77         /// The maximum value we can assign to the next outbound HTLC
78         pub next_outbound_htlc_limit_msat: u64,
79         /// The minimum value we can assign to the next outbound HTLC
80         pub next_outbound_htlc_minimum_msat: u64,
81 }
82
83 #[derive(Debug, Clone, Copy, PartialEq)]
84 enum FeeUpdateState {
85         // Inbound states mirroring InboundHTLCState
86         RemoteAnnounced,
87         AwaitingRemoteRevokeToAnnounce,
88         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
89         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
90         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
91         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
92         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
93
94         // Outbound state can only be `LocalAnnounced` or `Committed`
95         Outbound,
96 }
97
98 enum InboundHTLCRemovalReason {
99         FailRelay(msgs::OnionErrorPacket),
100         FailMalformed(([u8; 32], u16)),
101         Fulfill(PaymentPreimage),
102 }
103
104 enum InboundHTLCState {
105         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
106         /// update_add_htlc message for this HTLC.
107         RemoteAnnounced(PendingHTLCStatus),
108         /// Included in a received commitment_signed message (implying we've
109         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
110         /// state (see the example below). We have not yet included this HTLC in a
111         /// commitment_signed message because we are waiting on the remote's
112         /// aforementioned state revocation. One reason this missing remote RAA
113         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
114         /// is because every time we create a new "state", i.e. every time we sign a
115         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
116         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
117         /// sent provided the per_commitment_point for our current commitment tx.
118         /// The other reason we should not send a commitment_signed without their RAA
119         /// is because their RAA serves to ACK our previous commitment_signed.
120         ///
121         /// Here's an example of how an HTLC could come to be in this state:
122         /// remote --> update_add_htlc(prev_htlc)   --> local
123         /// remote --> commitment_signed(prev_htlc) --> local
124         /// remote <-- revoke_and_ack               <-- local
125         /// remote <-- commitment_signed(prev_htlc) <-- local
126         /// [note that here, the remote does not respond with a RAA]
127         /// remote --> update_add_htlc(this_htlc)   --> local
128         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
129         /// Now `this_htlc` will be assigned this state. It's unable to be officially
130         /// accepted, i.e. included in a commitment_signed, because we're missing the
131         /// RAA that provides our next per_commitment_point. The per_commitment_point
132         /// is used to derive commitment keys, which are used to construct the
133         /// signatures in a commitment_signed message.
134         /// Implies AwaitingRemoteRevoke.
135         ///
136         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
137         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
138         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
139         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
140         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
141         /// channel (before it can then get forwarded and/or removed).
142         /// Implies AwaitingRemoteRevoke.
143         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
144         Committed,
145         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
146         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
147         /// we'll drop it.
148         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
149         /// commitment transaction without it as otherwise we'll have to force-close the channel to
150         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
151         /// anyway). That said, ChannelMonitor does this for us (see
152         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
153         /// our own local state before then, once we're sure that the next commitment_signed and
154         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
155         LocalRemoved(InboundHTLCRemovalReason),
156 }
157
158 struct InboundHTLCOutput {
159         htlc_id: u64,
160         amount_msat: u64,
161         cltv_expiry: u32,
162         payment_hash: PaymentHash,
163         state: InboundHTLCState,
164 }
165
166 enum OutboundHTLCState {
167         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
168         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
169         /// we will promote to Committed (note that they may not accept it until the next time we
170         /// revoke, but we don't really care about that:
171         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
172         ///    money back (though we won't), and,
173         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
174         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
175         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
176         ///    we'll never get out of sync).
177         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
178         /// OutboundHTLCOutput's size just for a temporary bit
179         LocalAnnounced(Box<msgs::OnionPacket>),
180         Committed,
181         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
182         /// the change (though they'll need to revoke before we fail the payment).
183         RemoteRemoved(OutboundHTLCOutcome),
184         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
185         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
186         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
187         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
188         /// remote revoke_and_ack on a previous state before we can do so.
189         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
190         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
191         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
192         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
193         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
194         /// revoke_and_ack to drop completely.
195         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
196 }
197
198 #[derive(Clone)]
199 enum OutboundHTLCOutcome {
200         /// LDK version 0.0.105+ will always fill in the preimage here.
201         Success(Option<PaymentPreimage>),
202         Failure(HTLCFailReason),
203 }
204
205 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
206         fn from(o: Option<HTLCFailReason>) -> Self {
207                 match o {
208                         None => OutboundHTLCOutcome::Success(None),
209                         Some(r) => OutboundHTLCOutcome::Failure(r)
210                 }
211         }
212 }
213
214 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
215         fn into(self) -> Option<&'a HTLCFailReason> {
216                 match self {
217                         OutboundHTLCOutcome::Success(_) => None,
218                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
219                 }
220         }
221 }
222
223 struct OutboundHTLCOutput {
224         htlc_id: u64,
225         amount_msat: u64,
226         cltv_expiry: u32,
227         payment_hash: PaymentHash,
228         state: OutboundHTLCState,
229         source: HTLCSource,
230         skimmed_fee_msat: Option<u64>,
231 }
232
233 /// See AwaitingRemoteRevoke ChannelState for more info
234 enum HTLCUpdateAwaitingACK {
235         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
236                 // always outbound
237                 amount_msat: u64,
238                 cltv_expiry: u32,
239                 payment_hash: PaymentHash,
240                 source: HTLCSource,
241                 onion_routing_packet: msgs::OnionPacket,
242                 // The extra fee we're skimming off the top of this HTLC.
243                 skimmed_fee_msat: Option<u64>,
244         },
245         ClaimHTLC {
246                 payment_preimage: PaymentPreimage,
247                 htlc_id: u64,
248         },
249         FailHTLC {
250                 htlc_id: u64,
251                 err_packet: msgs::OnionErrorPacket,
252         },
253 }
254
255 /// There are a few "states" and then a number of flags which can be applied:
256 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
257 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
258 /// move on to `ChannelReady`.
259 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
260 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
261 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
262 enum ChannelState {
263         /// Implies we have (or are prepared to) send our open_channel/accept_channel message
264         OurInitSent = 1 << 0,
265         /// Implies we have received their `open_channel`/`accept_channel` message
266         TheirInitSent = 1 << 1,
267         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
268         /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
269         /// upon receipt of `funding_created`, so simply skip this state.
270         FundingCreated = 4,
271         /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
272         /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
273         /// and our counterparty consider the funding transaction confirmed.
274         FundingSent = 8,
275         /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
276         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
277         TheirChannelReady = 1 << 4,
278         /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
279         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
280         OurChannelReady = 1 << 5,
281         ChannelReady = 64,
282         /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
283         /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
284         /// dance.
285         PeerDisconnected = 1 << 7,
286         /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
287         /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
288         /// sending any outbound messages until they've managed to finish.
289         MonitorUpdateInProgress = 1 << 8,
290         /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
291         /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
292         /// messages as then we will be unable to determine which HTLCs they included in their
293         /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
294         /// later.
295         /// Flag is set on `ChannelReady`.
296         AwaitingRemoteRevoke = 1 << 9,
297         /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
298         /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
299         /// to respond with our own shutdown message when possible.
300         RemoteShutdownSent = 1 << 10,
301         /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
302         /// point, we may not add any new HTLCs to the channel.
303         LocalShutdownSent = 1 << 11,
304         /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
305         /// to drop us, but we store this anyway.
306         ShutdownComplete = 4096,
307         /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
308         /// broadcasting of the funding transaction is being held until all channels in the batch
309         /// have received funding_signed and have their monitors persisted.
310         WaitingForBatch = 1 << 13,
311 }
312 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
313         ChannelState::LocalShutdownSent as u32 |
314         ChannelState::RemoteShutdownSent as u32;
315 const MULTI_STATE_FLAGS: u32 =
316         BOTH_SIDES_SHUTDOWN_MASK |
317         ChannelState::PeerDisconnected as u32 |
318         ChannelState::MonitorUpdateInProgress as u32;
319 const STATE_FLAGS: u32 =
320         MULTI_STATE_FLAGS |
321         ChannelState::TheirChannelReady as u32 |
322         ChannelState::OurChannelReady as u32 |
323         ChannelState::AwaitingRemoteRevoke as u32 |
324         ChannelState::WaitingForBatch as u32;
325
326 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
327
328 pub const DEFAULT_MAX_HTLCS: u16 = 50;
329
330 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
331         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
332         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
333         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
334 }
335
336 #[cfg(not(test))]
337 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
338 #[cfg(test)]
339 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
340
341 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
342
343 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
344 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
345 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
346 /// `holder_max_htlc_value_in_flight_msat`.
347 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
348
349 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
350 /// `option_support_large_channel` (aka wumbo channels) is not supported.
351 /// It's 2^24 - 1.
352 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
353
354 /// Total bitcoin supply in satoshis.
355 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
356
357 /// The maximum network dust limit for standard script formats. This currently represents the
358 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
359 /// transaction non-standard and thus refuses to relay it.
360 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
361 /// implementations use this value for their dust limit today.
362 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
363
364 /// The maximum channel dust limit we will accept from our counterparty.
365 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
366
367 /// The dust limit is used for both the commitment transaction outputs as well as the closing
368 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
369 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
370 /// In order to avoid having to concern ourselves with standardness during the closing process, we
371 /// simply require our counterparty to use a dust limit which will leave any segwit output
372 /// standard.
373 /// See <https://github.com/lightning/bolts/issues/905> for more details.
374 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
375
376 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
377 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
378
379 /// Used to return a simple Error back to ChannelManager. Will get converted to a
380 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
381 /// channel_id in ChannelManager.
382 pub(super) enum ChannelError {
383         Ignore(String),
384         Warn(String),
385         Close(String),
386 }
387
388 impl fmt::Debug for ChannelError {
389         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
390                 match self {
391                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
392                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
393                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
394                 }
395         }
396 }
397
398 impl fmt::Display for ChannelError {
399         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400                 match self {
401                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
402                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
403                         &ChannelError::Close(ref e) => write!(f, "{}", e),
404                 }
405         }
406 }
407
408 macro_rules! secp_check {
409         ($res: expr, $err: expr) => {
410                 match $res {
411                         Ok(thing) => thing,
412                         Err(_) => return Err(ChannelError::Close($err)),
413                 }
414         };
415 }
416
417 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
418 /// our counterparty or not. However, we don't want to announce updates right away to avoid
419 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
420 /// our channel_update message and track the current state here.
421 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
422 #[derive(Clone, Copy, PartialEq)]
423 pub(super) enum ChannelUpdateStatus {
424         /// We've announced the channel as enabled and are connected to our peer.
425         Enabled,
426         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
427         DisabledStaged(u8),
428         /// Our channel is live again, but we haven't announced the channel as enabled yet.
429         EnabledStaged(u8),
430         /// We've announced the channel as disabled.
431         Disabled,
432 }
433
434 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
435 #[derive(PartialEq)]
436 pub enum AnnouncementSigsState {
437         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
438         /// we sent the last `AnnouncementSignatures`.
439         NotSent,
440         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
441         /// This state never appears on disk - instead we write `NotSent`.
442         MessageSent,
443         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
444         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
445         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
446         /// they send back a `RevokeAndACK`.
447         /// This state never appears on disk - instead we write `NotSent`.
448         Committed,
449         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
450         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
451         PeerReceived,
452 }
453
454 /// An enum indicating whether the local or remote side offered a given HTLC.
455 enum HTLCInitiator {
456         LocalOffered,
457         RemoteOffered,
458 }
459
460 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
461 struct HTLCStats {
462         pending_htlcs: u32,
463         pending_htlcs_value_msat: u64,
464         on_counterparty_tx_dust_exposure_msat: u64,
465         on_holder_tx_dust_exposure_msat: u64,
466         holding_cell_msat: u64,
467         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
468 }
469
470 /// An enum gathering stats on commitment transaction, either local or remote.
471 struct CommitmentStats<'a> {
472         tx: CommitmentTransaction, // the transaction info
473         feerate_per_kw: u32, // the feerate included to build the transaction
474         total_fee_sat: u64, // the total fee included in the transaction
475         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
476         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
477         local_balance_msat: u64, // local balance before fees but considering dust limits
478         remote_balance_msat: u64, // remote balance before fees but considering dust limits
479         preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
480 }
481
482 /// Used when calculating whether we or the remote can afford an additional HTLC.
483 struct HTLCCandidate {
484         amount_msat: u64,
485         origin: HTLCInitiator,
486 }
487
488 impl HTLCCandidate {
489         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
490                 Self {
491                         amount_msat,
492                         origin,
493                 }
494         }
495 }
496
497 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
498 /// description
499 enum UpdateFulfillFetch {
500         NewClaim {
501                 monitor_update: ChannelMonitorUpdate,
502                 htlc_value_msat: u64,
503                 msg: Option<msgs::UpdateFulfillHTLC>,
504         },
505         DuplicateClaim {},
506 }
507
508 /// The return type of get_update_fulfill_htlc_and_commit.
509 pub enum UpdateFulfillCommitFetch {
510         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
511         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
512         /// previously placed in the holding cell (and has since been removed).
513         NewClaim {
514                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
515                 monitor_update: ChannelMonitorUpdate,
516                 /// The value of the HTLC which was claimed, in msat.
517                 htlc_value_msat: u64,
518         },
519         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
520         /// or has been forgotten (presumably previously claimed).
521         DuplicateClaim {},
522 }
523
524 /// The return value of `monitor_updating_restored`
525 pub(super) struct MonitorRestoreUpdates {
526         pub raa: Option<msgs::RevokeAndACK>,
527         pub commitment_update: Option<msgs::CommitmentUpdate>,
528         pub order: RAACommitmentOrder,
529         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
530         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
531         pub finalized_claimed_htlcs: Vec<HTLCSource>,
532         pub funding_broadcastable: Option<Transaction>,
533         pub channel_ready: Option<msgs::ChannelReady>,
534         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
535 }
536
537 /// The return value of `channel_reestablish`
538 pub(super) struct ReestablishResponses {
539         pub channel_ready: Option<msgs::ChannelReady>,
540         pub raa: Option<msgs::RevokeAndACK>,
541         pub commitment_update: Option<msgs::CommitmentUpdate>,
542         pub order: RAACommitmentOrder,
543         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
544         pub shutdown_msg: Option<msgs::Shutdown>,
545 }
546
547 /// The result of a shutdown that should be handled.
548 #[must_use]
549 pub(crate) struct ShutdownResult {
550         /// A channel monitor update to apply.
551         pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
552         /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
553         pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
554         /// An unbroadcasted batch funding transaction id. The closure of this channel should be
555         /// propagated to the remainder of the batch.
556         pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
557 }
558
559 /// If the majority of the channels funds are to the fundee and the initiator holds only just
560 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
561 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
562 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
563 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
564 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
565 /// by this multiple without hitting this case, before sending.
566 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
567 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
568 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
569 /// leave the channel less usable as we hold a bigger reserve.
570 #[cfg(any(fuzzing, test))]
571 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
572 #[cfg(not(any(fuzzing, test)))]
573 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
574
575 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
576 /// channel creation on an inbound channel, we simply force-close and move on.
577 /// This constant is the one suggested in BOLT 2.
578 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
579
580 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
581 /// not have enough balance value remaining to cover the onchain cost of this new
582 /// HTLC weight. If this happens, our counterparty fails the reception of our
583 /// commitment_signed including this new HTLC due to infringement on the channel
584 /// reserve.
585 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
586 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
587 /// leads to a channel force-close. Ultimately, this is an issue coming from the
588 /// design of LN state machines, allowing asynchronous updates.
589 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
590
591 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
592 /// commitment transaction fees, with at least this many HTLCs present on the commitment
593 /// transaction (not counting the value of the HTLCs themselves).
594 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
595
596 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
597 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
598 /// ChannelUpdate prompted by the config update. This value was determined as follows:
599 ///
600 ///   * The expected interval between ticks (1 minute).
601 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
602 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
603 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
604 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
605
606 /// The number of ticks that may elapse while we're waiting for a response to a
607 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
608 /// them.
609 ///
610 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
611 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
612
613 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
614 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
615 /// exceeding this age limit will be force-closed and purged from memory.
616 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
617
618 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
619 pub(crate) const COINBASE_MATURITY: u32 = 100;
620
621 struct PendingChannelMonitorUpdate {
622         update: ChannelMonitorUpdate,
623 }
624
625 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
626         (0, update, required),
627 });
628
629 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
630 /// its variants containing an appropriate channel struct.
631 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
632         UnfundedOutboundV1(OutboundV1Channel<SP>),
633         UnfundedInboundV1(InboundV1Channel<SP>),
634         Funded(Channel<SP>),
635 }
636
637 impl<'a, SP: Deref> ChannelPhase<SP> where
638         SP::Target: SignerProvider,
639         <SP::Target as SignerProvider>::Signer: ChannelSigner,
640 {
641         pub fn context(&'a self) -> &'a ChannelContext<SP> {
642                 match self {
643                         ChannelPhase::Funded(chan) => &chan.context,
644                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
645                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
646                 }
647         }
648
649         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
650                 match self {
651                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
652                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
653                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
654                 }
655         }
656 }
657
658 /// Contains all state common to unfunded inbound/outbound channels.
659 pub(super) struct UnfundedChannelContext {
660         /// A counter tracking how many ticks have elapsed since this unfunded channel was
661         /// created. If this unfunded channel reaches peer has yet to respond after reaching
662         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
663         ///
664         /// This is so that we don't keep channels around that haven't progressed to a funded state
665         /// in a timely manner.
666         unfunded_channel_age_ticks: usize,
667 }
668
669 impl UnfundedChannelContext {
670         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
671         /// having reached the unfunded channel age limit.
672         ///
673         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
674         pub fn should_expire_unfunded_channel(&mut self) -> bool {
675                 self.unfunded_channel_age_ticks += 1;
676                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
677         }
678 }
679
680 /// Contains everything about the channel including state, and various flags.
681 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
682         config: LegacyChannelConfig,
683
684         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
685         // constructed using it. The second element in the tuple corresponds to the number of ticks that
686         // have elapsed since the update occurred.
687         prev_config: Option<(ChannelConfig, usize)>,
688
689         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
690
691         user_id: u128,
692
693         /// The current channel ID.
694         channel_id: ChannelId,
695         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
696         /// Will be `None` for channels created prior to 0.0.115.
697         temporary_channel_id: Option<ChannelId>,
698         channel_state: u32,
699
700         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
701         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
702         // next connect.
703         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
704         // Note that a number of our tests were written prior to the behavior here which retransmits
705         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
706         // many tests.
707         #[cfg(any(test, feature = "_test_utils"))]
708         pub(crate) announcement_sigs_state: AnnouncementSigsState,
709         #[cfg(not(any(test, feature = "_test_utils")))]
710         announcement_sigs_state: AnnouncementSigsState,
711
712         secp_ctx: Secp256k1<secp256k1::All>,
713         channel_value_satoshis: u64,
714
715         latest_monitor_update_id: u64,
716
717         holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
718         shutdown_scriptpubkey: Option<ShutdownScript>,
719         destination_script: Script,
720
721         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
722         // generation start at 0 and count up...this simplifies some parts of implementation at the
723         // cost of others, but should really just be changed.
724
725         cur_holder_commitment_transaction_number: u64,
726         cur_counterparty_commitment_transaction_number: u64,
727         value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
728         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
729         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
730         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
731
732         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
733         /// need to ensure we resend them in the order we originally generated them. Note that because
734         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
735         /// sufficient to simply set this to the opposite of any message we are generating as we
736         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
737         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
738         /// send it first.
739         resend_order: RAACommitmentOrder,
740
741         monitor_pending_channel_ready: bool,
742         monitor_pending_revoke_and_ack: bool,
743         monitor_pending_commitment_signed: bool,
744
745         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
746         // responsible for some of the HTLCs here or not - we don't know whether the update in question
747         // completed or not. We currently ignore these fields entirely when force-closing a channel,
748         // but need to handle this somehow or we run the risk of losing HTLCs!
749         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
750         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
751         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
752
753         // pending_update_fee is filled when sending and receiving update_fee.
754         //
755         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
756         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
757         // generating new commitment transactions with exactly the same criteria as inbound/outbound
758         // HTLCs with similar state.
759         pending_update_fee: Option<(u32, FeeUpdateState)>,
760         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
761         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
762         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
763         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
764         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
765         holding_cell_update_fee: Option<u32>,
766         next_holder_htlc_id: u64,
767         next_counterparty_htlc_id: u64,
768         feerate_per_kw: u32,
769
770         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
771         /// when the channel is updated in ways which may impact the `channel_update` message or when a
772         /// new block is received, ensuring it's always at least moderately close to the current real
773         /// time.
774         update_time_counter: u32,
775
776         #[cfg(debug_assertions)]
777         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
778         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
779         #[cfg(debug_assertions)]
780         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
781         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
782
783         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
784         target_closing_feerate_sats_per_kw: Option<u32>,
785
786         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
787         /// update, we need to delay processing it until later. We do that here by simply storing the
788         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
789         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
790
791         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
792         /// transaction. These are set once we reach `closing_negotiation_ready`.
793         #[cfg(test)]
794         pub(crate) closing_fee_limits: Option<(u64, u64)>,
795         #[cfg(not(test))]
796         closing_fee_limits: Option<(u64, u64)>,
797
798         /// The hash of the block in which the funding transaction was included.
799         funding_tx_confirmed_in: Option<BlockHash>,
800         funding_tx_confirmation_height: u32,
801         short_channel_id: Option<u64>,
802         /// Either the height at which this channel was created or the height at which it was last
803         /// serialized if it was serialized by versions prior to 0.0.103.
804         /// We use this to close if funding is never broadcasted.
805         channel_creation_height: u32,
806
807         counterparty_dust_limit_satoshis: u64,
808
809         #[cfg(test)]
810         pub(super) holder_dust_limit_satoshis: u64,
811         #[cfg(not(test))]
812         holder_dust_limit_satoshis: u64,
813
814         #[cfg(test)]
815         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
816         #[cfg(not(test))]
817         counterparty_max_htlc_value_in_flight_msat: u64,
818
819         #[cfg(test)]
820         pub(super) holder_max_htlc_value_in_flight_msat: u64,
821         #[cfg(not(test))]
822         holder_max_htlc_value_in_flight_msat: u64,
823
824         /// minimum channel reserve for self to maintain - set by them.
825         counterparty_selected_channel_reserve_satoshis: Option<u64>,
826
827         #[cfg(test)]
828         pub(super) holder_selected_channel_reserve_satoshis: u64,
829         #[cfg(not(test))]
830         holder_selected_channel_reserve_satoshis: u64,
831
832         counterparty_htlc_minimum_msat: u64,
833         holder_htlc_minimum_msat: u64,
834         #[cfg(test)]
835         pub counterparty_max_accepted_htlcs: u16,
836         #[cfg(not(test))]
837         counterparty_max_accepted_htlcs: u16,
838         holder_max_accepted_htlcs: u16,
839         minimum_depth: Option<u32>,
840
841         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
842
843         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
844         funding_transaction: Option<Transaction>,
845         is_batch_funding: Option<()>,
846
847         counterparty_cur_commitment_point: Option<PublicKey>,
848         counterparty_prev_commitment_point: Option<PublicKey>,
849         counterparty_node_id: PublicKey,
850
851         counterparty_shutdown_scriptpubkey: Option<Script>,
852
853         commitment_secrets: CounterpartyCommitmentSecrets,
854
855         channel_update_status: ChannelUpdateStatus,
856         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
857         /// not complete within a single timer tick (one minute), we should force-close the channel.
858         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
859         /// to DoS us.
860         /// Note that this field is reset to false on deserialization to give us a chance to connect to
861         /// our peer and start the closing_signed negotiation fresh.
862         closing_signed_in_flight: bool,
863
864         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
865         /// This can be used to rebroadcast the channel_announcement message later.
866         announcement_sigs: Option<(Signature, Signature)>,
867
868         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
869         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
870         // be, by comparing the cached values to the fee of the tranaction generated by
871         // `build_commitment_transaction`.
872         #[cfg(any(test, fuzzing))]
873         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
874         #[cfg(any(test, fuzzing))]
875         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
876
877         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
878         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
879         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
880         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
881         /// message until we receive a channel_reestablish.
882         ///
883         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
884         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
885
886         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
887         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
888         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
889         /// unblock the state machine.
890         ///
891         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
892         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
893         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
894         ///
895         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
896         /// [`msgs::RevokeAndACK`] message from the counterparty.
897         sent_message_awaiting_response: Option<usize>,
898
899         #[cfg(any(test, fuzzing))]
900         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
901         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
902         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
903         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
904         // is fine, but as a sanity check in our failure to generate the second claim, we check here
905         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
906         historical_inbound_htlc_fulfills: HashSet<u64>,
907
908         /// This channel's type, as negotiated during channel open
909         channel_type: ChannelTypeFeatures,
910
911         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
912         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
913         // the channel's funding UTXO.
914         //
915         // We also use this when sending our peer a channel_update that isn't to be broadcasted
916         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
917         // associated channel mapping.
918         //
919         // We only bother storing the most recent SCID alias at any time, though our counterparty has
920         // to store all of them.
921         latest_inbound_scid_alias: Option<u64>,
922
923         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
924         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
925         // don't currently support node id aliases and eventually privacy should be provided with
926         // blinded paths instead of simple scid+node_id aliases.
927         outbound_scid_alias: u64,
928
929         // We track whether we already emitted a `ChannelPending` event.
930         channel_pending_event_emitted: bool,
931
932         // We track whether we already emitted a `ChannelReady` event.
933         channel_ready_event_emitted: bool,
934
935         /// The unique identifier used to re-derive the private key material for the channel through
936         /// [`SignerProvider::derive_channel_signer`].
937         channel_keys_id: [u8; 32],
938
939         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
940         /// store it here and only release it to the `ChannelManager` once it asks for it.
941         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
942 }
943
944 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
945         /// Allowed in any state (including after shutdown)
946         pub fn get_update_time_counter(&self) -> u32 {
947                 self.update_time_counter
948         }
949
950         pub fn get_latest_monitor_update_id(&self) -> u64 {
951                 self.latest_monitor_update_id
952         }
953
954         pub fn should_announce(&self) -> bool {
955                 self.config.announced_channel
956         }
957
958         pub fn is_outbound(&self) -> bool {
959                 self.channel_transaction_parameters.is_outbound_from_holder
960         }
961
962         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
963         /// Allowed in any state (including after shutdown)
964         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
965                 self.config.options.forwarding_fee_base_msat
966         }
967
968         /// Returns true if we've ever received a message from the remote end for this Channel
969         pub fn have_received_message(&self) -> bool {
970                 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
971         }
972
973         /// Returns true if this channel is fully established and not known to be closing.
974         /// Allowed in any state (including after shutdown)
975         pub fn is_usable(&self) -> bool {
976                 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
977                 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
978         }
979
980         /// shutdown state returns the state of the channel in its various stages of shutdown
981         pub fn shutdown_state(&self) -> ChannelShutdownState {
982                 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
983                         return ChannelShutdownState::ShutdownComplete;
984                 }
985                 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
986                         return ChannelShutdownState::ShutdownInitiated;
987                 }
988                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
989                         return ChannelShutdownState::ResolvingHTLCs;
990                 }
991                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
992                         return ChannelShutdownState::NegotiatingClosingFee;
993                 }
994                 return ChannelShutdownState::NotShuttingDown;
995         }
996
997         fn closing_negotiation_ready(&self) -> bool {
998                 self.pending_inbound_htlcs.is_empty() &&
999                 self.pending_outbound_htlcs.is_empty() &&
1000                 self.pending_update_fee.is_none() &&
1001                 self.channel_state &
1002                 (BOTH_SIDES_SHUTDOWN_MASK |
1003                         ChannelState::AwaitingRemoteRevoke as u32 |
1004                         ChannelState::PeerDisconnected as u32 |
1005                         ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1006         }
1007
1008         /// Returns true if this channel is currently available for use. This is a superset of
1009         /// is_usable() and considers things like the channel being temporarily disabled.
1010         /// Allowed in any state (including after shutdown)
1011         pub fn is_live(&self) -> bool {
1012                 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1013         }
1014
1015         // Public utilities:
1016
1017         pub fn channel_id(&self) -> ChannelId {
1018                 self.channel_id
1019         }
1020
1021         // Return the `temporary_channel_id` used during channel establishment.
1022         //
1023         // Will return `None` for channels created prior to LDK version 0.0.115.
1024         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1025                 self.temporary_channel_id
1026         }
1027
1028         pub fn minimum_depth(&self) -> Option<u32> {
1029                 self.minimum_depth
1030         }
1031
1032         /// Gets the "user_id" value passed into the construction of this channel. It has no special
1033         /// meaning and exists only to allow users to have a persistent identifier of a channel.
1034         pub fn get_user_id(&self) -> u128 {
1035                 self.user_id
1036         }
1037
1038         /// Gets the channel's type
1039         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1040                 &self.channel_type
1041         }
1042
1043         /// Gets the channel's `short_channel_id`.
1044         ///
1045         /// Will return `None` if the channel hasn't been confirmed yet.
1046         pub fn get_short_channel_id(&self) -> Option<u64> {
1047                 self.short_channel_id
1048         }
1049
1050         /// Allowed in any state (including after shutdown)
1051         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1052                 self.latest_inbound_scid_alias
1053         }
1054
1055         /// Allowed in any state (including after shutdown)
1056         pub fn outbound_scid_alias(&self) -> u64 {
1057                 self.outbound_scid_alias
1058         }
1059
1060         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1061         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1062         /// or prior to any channel actions during `Channel` initialization.
1063         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1064                 debug_assert_eq!(self.outbound_scid_alias, 0);
1065                 self.outbound_scid_alias = outbound_scid_alias;
1066         }
1067
1068         /// Returns the funding_txo we either got from our peer, or were given by
1069         /// get_funding_created.
1070         pub fn get_funding_txo(&self) -> Option<OutPoint> {
1071                 self.channel_transaction_parameters.funding_outpoint
1072         }
1073
1074         /// Returns the block hash in which our funding transaction was confirmed.
1075         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1076                 self.funding_tx_confirmed_in
1077         }
1078
1079         /// Returns the current number of confirmations on the funding transaction.
1080         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1081                 if self.funding_tx_confirmation_height == 0 {
1082                         // We either haven't seen any confirmation yet, or observed a reorg.
1083                         return 0;
1084                 }
1085
1086                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1087         }
1088
1089         fn get_holder_selected_contest_delay(&self) -> u16 {
1090                 self.channel_transaction_parameters.holder_selected_contest_delay
1091         }
1092
1093         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1094                 &self.channel_transaction_parameters.holder_pubkeys
1095         }
1096
1097         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1098                 self.channel_transaction_parameters.counterparty_parameters
1099                         .as_ref().map(|params| params.selected_contest_delay)
1100         }
1101
1102         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1103                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1104         }
1105
1106         /// Allowed in any state (including after shutdown)
1107         pub fn get_counterparty_node_id(&self) -> PublicKey {
1108                 self.counterparty_node_id
1109         }
1110
1111         /// Allowed in any state (including after shutdown)
1112         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1113                 self.holder_htlc_minimum_msat
1114         }
1115
1116         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1117         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1118                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1119         }
1120
1121         /// Allowed in any state (including after shutdown)
1122         pub fn get_announced_htlc_max_msat(&self) -> u64 {
1123                 return cmp::min(
1124                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1125                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
1126                         // channel might have been used to route very small values (either by honest users or as DoS).
1127                         self.channel_value_satoshis * 1000 * 9 / 10,
1128
1129                         self.counterparty_max_htlc_value_in_flight_msat
1130                 );
1131         }
1132
1133         /// Allowed in any state (including after shutdown)
1134         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1135                 self.counterparty_htlc_minimum_msat
1136         }
1137
1138         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1139         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1140                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1141         }
1142
1143         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1144                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1145                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1146                         cmp::min(
1147                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1148                                 party_max_htlc_value_in_flight_msat
1149                         )
1150                 })
1151         }
1152
1153         pub fn get_value_satoshis(&self) -> u64 {
1154                 self.channel_value_satoshis
1155         }
1156
1157         pub fn get_fee_proportional_millionths(&self) -> u32 {
1158                 self.config.options.forwarding_fee_proportional_millionths
1159         }
1160
1161         pub fn get_cltv_expiry_delta(&self) -> u16 {
1162                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1163         }
1164
1165         pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1166                 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1167         where F::Target: FeeEstimator
1168         {
1169                 match self.config.options.max_dust_htlc_exposure {
1170                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1171                                 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1172                                         ConfirmationTarget::OnChainSweep) as u64;
1173                                 feerate_per_kw.saturating_mul(multiplier)
1174                         },
1175                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1176                 }
1177         }
1178
1179         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1180         pub fn prev_config(&self) -> Option<ChannelConfig> {
1181                 self.prev_config.map(|prev_config| prev_config.0)
1182         }
1183
1184         // Checks whether we should emit a `ChannelPending` event.
1185         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1186                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1187         }
1188
1189         // Returns whether we already emitted a `ChannelPending` event.
1190         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1191                 self.channel_pending_event_emitted
1192         }
1193
1194         // Remembers that we already emitted a `ChannelPending` event.
1195         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1196                 self.channel_pending_event_emitted = true;
1197         }
1198
1199         // Checks whether we should emit a `ChannelReady` event.
1200         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1201                 self.is_usable() && !self.channel_ready_event_emitted
1202         }
1203
1204         // Remembers that we already emitted a `ChannelReady` event.
1205         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1206                 self.channel_ready_event_emitted = true;
1207         }
1208
1209         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1210         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1211         /// no longer be considered when forwarding HTLCs.
1212         pub fn maybe_expire_prev_config(&mut self) {
1213                 if self.prev_config.is_none() {
1214                         return;
1215                 }
1216                 let prev_config = self.prev_config.as_mut().unwrap();
1217                 prev_config.1 += 1;
1218                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1219                         self.prev_config = None;
1220                 }
1221         }
1222
1223         /// Returns the current [`ChannelConfig`] applied to the channel.
1224         pub fn config(&self) -> ChannelConfig {
1225                 self.config.options
1226         }
1227
1228         /// Updates the channel's config. A bool is returned indicating whether the config update
1229         /// applied resulted in a new ChannelUpdate message.
1230         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1231                 let did_channel_update =
1232                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1233                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1234                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1235                 if did_channel_update {
1236                         self.prev_config = Some((self.config.options, 0));
1237                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1238                         // policy change to propagate throughout the network.
1239                         self.update_time_counter += 1;
1240                 }
1241                 self.config.options = *config;
1242                 did_channel_update
1243         }
1244
1245         /// Returns true if funding_signed was sent/received and the
1246         /// funding transaction has been broadcast if necessary.
1247         pub fn is_funding_broadcast(&self) -> bool {
1248                 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1249                         self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1250         }
1251
1252         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1253         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1254         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1255         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1256         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1257         /// an HTLC to a).
1258         /// @local is used only to convert relevant internal structures which refer to remote vs local
1259         /// to decide value of outputs and direction of HTLCs.
1260         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1261         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1262         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1263         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1264         /// which peer generated this transaction and "to whom" this transaction flows.
1265         #[inline]
1266         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1267                 where L::Target: Logger
1268         {
1269                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1270                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1271                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1272
1273                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1274                 let mut remote_htlc_total_msat = 0;
1275                 let mut local_htlc_total_msat = 0;
1276                 let mut value_to_self_msat_offset = 0;
1277
1278                 let mut feerate_per_kw = self.feerate_per_kw;
1279                 if let Some((feerate, update_state)) = self.pending_update_fee {
1280                         if match update_state {
1281                                 // Note that these match the inclusion criteria when scanning
1282                                 // pending_inbound_htlcs below.
1283                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1284                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1285                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
1286                         } {
1287                                 feerate_per_kw = feerate;
1288                         }
1289                 }
1290
1291                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1292                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1293                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1294                         &self.channel_id,
1295                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1296
1297                 macro_rules! get_htlc_in_commitment {
1298                         ($htlc: expr, $offered: expr) => {
1299                                 HTLCOutputInCommitment {
1300                                         offered: $offered,
1301                                         amount_msat: $htlc.amount_msat,
1302                                         cltv_expiry: $htlc.cltv_expiry,
1303                                         payment_hash: $htlc.payment_hash,
1304                                         transaction_output_index: None
1305                                 }
1306                         }
1307                 }
1308
1309                 macro_rules! add_htlc_output {
1310                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1311                                 if $outbound == local { // "offered HTLC output"
1312                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1313                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1314                                                 0
1315                                         } else {
1316                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1317                                         };
1318                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1319                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1320                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1321                                         } else {
1322                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1323                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1324                                         }
1325                                 } else {
1326                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1327                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1328                                                 0
1329                                         } else {
1330                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1331                                         };
1332                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1333                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1334                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1335                                         } else {
1336                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1337                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1338                                         }
1339                                 }
1340                         }
1341                 }
1342
1343                 for ref htlc in self.pending_inbound_htlcs.iter() {
1344                         let (include, state_name) = match htlc.state {
1345                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1346                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1347                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1348                                 InboundHTLCState::Committed => (true, "Committed"),
1349                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1350                         };
1351
1352                         if include {
1353                                 add_htlc_output!(htlc, false, None, state_name);
1354                                 remote_htlc_total_msat += htlc.amount_msat;
1355                         } else {
1356                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1357                                 match &htlc.state {
1358                                         &InboundHTLCState::LocalRemoved(ref reason) => {
1359                                                 if generated_by_local {
1360                                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1361                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
1362                                                         }
1363                                                 }
1364                                         },
1365                                         _ => {},
1366                                 }
1367                         }
1368                 }
1369
1370                 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1371
1372                 for ref htlc in self.pending_outbound_htlcs.iter() {
1373                         let (include, state_name) = match htlc.state {
1374                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1375                                 OutboundHTLCState::Committed => (true, "Committed"),
1376                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1377                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1378                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1379                         };
1380
1381                         let preimage_opt = match htlc.state {
1382                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1383                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1384                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1385                                 _ => None,
1386                         };
1387
1388                         if let Some(preimage) = preimage_opt {
1389                                 preimages.push(preimage);
1390                         }
1391
1392                         if include {
1393                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1394                                 local_htlc_total_msat += htlc.amount_msat;
1395                         } else {
1396                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1397                                 match htlc.state {
1398                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1399                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
1400                                         },
1401                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1402                                                 if !generated_by_local {
1403                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
1404                                                 }
1405                                         },
1406                                         _ => {},
1407                                 }
1408                         }
1409                 }
1410
1411                 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1412                 assert!(value_to_self_msat >= 0);
1413                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1414                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1415                 // "violate" their reserve value by couting those against it. Thus, we have to convert
1416                 // everything to i64 before subtracting as otherwise we can overflow.
1417                 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1418                 assert!(value_to_remote_msat >= 0);
1419
1420                 #[cfg(debug_assertions)]
1421                 {
1422                         // Make sure that the to_self/to_remote is always either past the appropriate
1423                         // channel_reserve *or* it is making progress towards it.
1424                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1425                                 self.holder_max_commitment_tx_output.lock().unwrap()
1426                         } else {
1427                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
1428                         };
1429                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1430                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1431                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1432                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1433                 }
1434
1435                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1436                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1437                 let (value_to_self, value_to_remote) = if self.is_outbound() {
1438                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1439                 } else {
1440                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1441                 };
1442
1443                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1444                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1445                 let (funding_pubkey_a, funding_pubkey_b) = if local {
1446                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1447                 } else {
1448                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1449                 };
1450
1451                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1452                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1453                 } else {
1454                         value_to_a = 0;
1455                 }
1456
1457                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1458                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1459                 } else {
1460                         value_to_b = 0;
1461                 }
1462
1463                 let num_nondust_htlcs = included_non_dust_htlcs.len();
1464
1465                 let channel_parameters =
1466                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1467                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1468                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1469                                                                              value_to_a as u64,
1470                                                                              value_to_b as u64,
1471                                                                              funding_pubkey_a,
1472                                                                              funding_pubkey_b,
1473                                                                              keys.clone(),
1474                                                                              feerate_per_kw,
1475                                                                              &mut included_non_dust_htlcs,
1476                                                                              &channel_parameters
1477                 );
1478                 let mut htlcs_included = included_non_dust_htlcs;
1479                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1480                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1481                 htlcs_included.append(&mut included_dust_htlcs);
1482
1483                 // For the stats, trimmed-to-0 the value in msats accordingly
1484                 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1485                 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1486
1487                 CommitmentStats {
1488                         tx,
1489                         feerate_per_kw,
1490                         total_fee_sat,
1491                         num_nondust_htlcs,
1492                         htlcs_included,
1493                         local_balance_msat: value_to_self_msat as u64,
1494                         remote_balance_msat: value_to_remote_msat as u64,
1495                         preimages
1496                 }
1497         }
1498
1499         #[inline]
1500         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1501         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1502         /// our counterparty!)
1503         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1504         /// TODO Some magic rust shit to compile-time check this?
1505         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1506                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1507                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1508                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1509                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1510
1511                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1512         }
1513
1514         #[inline]
1515         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1516         /// will sign and send to our counterparty.
1517         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1518         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1519                 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1520                 //may see payments to it!
1521                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1522                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1523                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1524
1525                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1526         }
1527
1528         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1529         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1530         /// Panics if called before accept_channel/InboundV1Channel::new
1531         pub fn get_funding_redeemscript(&self) -> Script {
1532                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1533         }
1534
1535         fn counterparty_funding_pubkey(&self) -> &PublicKey {
1536                 &self.get_counterparty_pubkeys().funding_pubkey
1537         }
1538
1539         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1540                 self.feerate_per_kw
1541         }
1542
1543         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1544                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1545                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1546                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1547                 // more dust balance if the feerate increases when we have several HTLCs pending
1548                 // which are near the dust limit.
1549                 let mut feerate_per_kw = self.feerate_per_kw;
1550                 // If there's a pending update fee, use it to ensure we aren't under-estimating
1551                 // potential feerate updates coming soon.
1552                 if let Some((feerate, _)) = self.pending_update_fee {
1553                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1554                 }
1555                 if let Some(feerate) = outbound_feerate_update {
1556                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1557                 }
1558                 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1559         }
1560
1561         /// Get forwarding information for the counterparty.
1562         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1563                 self.counterparty_forwarding_info.clone()
1564         }
1565
1566         /// Returns a HTLCStats about inbound pending htlcs
1567         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1568                 let context = self;
1569                 let mut stats = HTLCStats {
1570                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1571                         pending_htlcs_value_msat: 0,
1572                         on_counterparty_tx_dust_exposure_msat: 0,
1573                         on_holder_tx_dust_exposure_msat: 0,
1574                         holding_cell_msat: 0,
1575                         on_holder_tx_holding_cell_htlcs_count: 0,
1576                 };
1577
1578                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1579                         (0, 0)
1580                 } else {
1581                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1582                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1583                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1584                 };
1585                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1586                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1587                 for ref htlc in context.pending_inbound_htlcs.iter() {
1588                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1589                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1590                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1591                         }
1592                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1593                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1594                         }
1595                 }
1596                 stats
1597         }
1598
1599         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1600         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1601                 let context = self;
1602                 let mut stats = HTLCStats {
1603                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1604                         pending_htlcs_value_msat: 0,
1605                         on_counterparty_tx_dust_exposure_msat: 0,
1606                         on_holder_tx_dust_exposure_msat: 0,
1607                         holding_cell_msat: 0,
1608                         on_holder_tx_holding_cell_htlcs_count: 0,
1609                 };
1610
1611                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1612                         (0, 0)
1613                 } else {
1614                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1615                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1616                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1617                 };
1618                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1619                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1620                 for ref htlc in context.pending_outbound_htlcs.iter() {
1621                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1622                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1623                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1624                         }
1625                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1626                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1627                         }
1628                 }
1629
1630                 for update in context.holding_cell_htlc_updates.iter() {
1631                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1632                                 stats.pending_htlcs += 1;
1633                                 stats.pending_htlcs_value_msat += amount_msat;
1634                                 stats.holding_cell_msat += amount_msat;
1635                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1636                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1637                                 }
1638                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1639                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
1640                                 } else {
1641                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
1642                                 }
1643                         }
1644                 }
1645                 stats
1646         }
1647
1648         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1649         /// Doesn't bother handling the
1650         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1651         /// corner case properly.
1652         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1653         -> AvailableBalances
1654         where F::Target: FeeEstimator
1655         {
1656                 let context = &self;
1657                 // Note that we have to handle overflow due to the above case.
1658                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1659                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1660
1661                 let mut balance_msat = context.value_to_self_msat;
1662                 for ref htlc in context.pending_inbound_htlcs.iter() {
1663                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1664                                 balance_msat += htlc.amount_msat;
1665                         }
1666                 }
1667                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1668
1669                 let outbound_capacity_msat = context.value_to_self_msat
1670                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1671                                 .saturating_sub(
1672                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1673
1674                 let mut available_capacity_msat = outbound_capacity_msat;
1675
1676                 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1677                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1678                 } else {
1679                         0
1680                 };
1681                 if context.is_outbound() {
1682                         // We should mind channel commit tx fee when computing how much of the available capacity
1683                         // can be used in the next htlc. Mirrors the logic in send_htlc.
1684                         //
1685                         // The fee depends on whether the amount we will be sending is above dust or not,
1686                         // and the answer will in turn change the amount itself â€” making it a circular
1687                         // dependency.
1688                         // This complicates the computation around dust-values, up to the one-htlc-value.
1689                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1690                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1691                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1692                         }
1693
1694                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1695                         let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1696                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1697                         let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1698                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1699                                 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1700                                 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1701                         }
1702
1703                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
1704                         // value ends up being below dust, we have this fee available again. In that case,
1705                         // match the value to right-below-dust.
1706                         let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1707                                 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1708                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1709                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1710                                 debug_assert!(one_htlc_difference_msat != 0);
1711                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1712                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1713                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1714                         } else {
1715                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1716                         }
1717                 } else {
1718                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1719                         // sending a new HTLC won't reduce their balance below our reserve threshold.
1720                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1721                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1722                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1723                         }
1724
1725                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1726                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1727
1728                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1729                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1730                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1731
1732                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1733                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1734                                 // we've selected for them, we can only send dust HTLCs.
1735                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1736                         }
1737                 }
1738
1739                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1740
1741                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1742                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1743                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1744                 // send above the dust limit (as the router can always overpay to meet the dust limit).
1745                 let mut remaining_msat_below_dust_exposure_limit = None;
1746                 let mut dust_exposure_dust_limit_msat = 0;
1747                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1748
1749                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1750                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1751                 } else {
1752                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1753                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1754                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1755                 };
1756                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1757                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1758                         remaining_msat_below_dust_exposure_limit =
1759                                 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1760                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1761                 }
1762
1763                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1764                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1765                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1766                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1767                                 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1768                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1769                 }
1770
1771                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1772                         if available_capacity_msat < dust_exposure_dust_limit_msat {
1773                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1774                         } else {
1775                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1776                         }
1777                 }
1778
1779                 available_capacity_msat = cmp::min(available_capacity_msat,
1780                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1781
1782                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1783                         available_capacity_msat = 0;
1784                 }
1785
1786                 AvailableBalances {
1787                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1788                                         - context.value_to_self_msat as i64
1789                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1790                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1791                                 0) as u64,
1792                         outbound_capacity_msat,
1793                         next_outbound_htlc_limit_msat: available_capacity_msat,
1794                         next_outbound_htlc_minimum_msat,
1795                         balance_msat,
1796                 }
1797         }
1798
1799         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1800                 let context = &self;
1801                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1802         }
1803
1804         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1805         /// number of pending HTLCs that are on track to be in our next commitment tx.
1806         ///
1807         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1808         /// `fee_spike_buffer_htlc` is `Some`.
1809         ///
1810         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1811         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1812         ///
1813         /// Dust HTLCs are excluded.
1814         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1815                 let context = &self;
1816                 assert!(context.is_outbound());
1817
1818                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1819                         (0, 0)
1820                 } else {
1821                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1822                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1823                 };
1824                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1825                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1826
1827                 let mut addl_htlcs = 0;
1828                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1829                 match htlc.origin {
1830                         HTLCInitiator::LocalOffered => {
1831                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1832                                         addl_htlcs += 1;
1833                                 }
1834                         },
1835                         HTLCInitiator::RemoteOffered => {
1836                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1837                                         addl_htlcs += 1;
1838                                 }
1839                         }
1840                 }
1841
1842                 let mut included_htlcs = 0;
1843                 for ref htlc in context.pending_inbound_htlcs.iter() {
1844                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1845                                 continue
1846                         }
1847                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1848                         // transaction including this HTLC if it times out before they RAA.
1849                         included_htlcs += 1;
1850                 }
1851
1852                 for ref htlc in context.pending_outbound_htlcs.iter() {
1853                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1854                                 continue
1855                         }
1856                         match htlc.state {
1857                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1858                                 OutboundHTLCState::Committed => included_htlcs += 1,
1859                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1860                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1861                                 // transaction won't be generated until they send us their next RAA, which will mean
1862                                 // dropping any HTLCs in this state.
1863                                 _ => {},
1864                         }
1865                 }
1866
1867                 for htlc in context.holding_cell_htlc_updates.iter() {
1868                         match htlc {
1869                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1870                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
1871                                                 continue
1872                                         }
1873                                         included_htlcs += 1
1874                                 },
1875                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1876                                          // ack we're guaranteed to never include them in commitment txs anymore.
1877                         }
1878                 }
1879
1880                 let num_htlcs = included_htlcs + addl_htlcs;
1881                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1882                 #[cfg(any(test, fuzzing))]
1883                 {
1884                         let mut fee = res;
1885                         if fee_spike_buffer_htlc.is_some() {
1886                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1887                         }
1888                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1889                                 + context.holding_cell_htlc_updates.len();
1890                         let commitment_tx_info = CommitmentTxInfoCached {
1891                                 fee,
1892                                 total_pending_htlcs,
1893                                 next_holder_htlc_id: match htlc.origin {
1894                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1895                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1896                                 },
1897                                 next_counterparty_htlc_id: match htlc.origin {
1898                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1899                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1900                                 },
1901                                 feerate: context.feerate_per_kw,
1902                         };
1903                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1904                 }
1905                 res
1906         }
1907
1908         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1909         /// pending HTLCs that are on track to be in their next commitment tx
1910         ///
1911         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1912         /// `fee_spike_buffer_htlc` is `Some`.
1913         ///
1914         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1915         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1916         ///
1917         /// Dust HTLCs are excluded.
1918         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1919                 let context = &self;
1920                 assert!(!context.is_outbound());
1921
1922                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1923                         (0, 0)
1924                 } else {
1925                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1926                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1927                 };
1928                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1929                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1930
1931                 let mut addl_htlcs = 0;
1932                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1933                 match htlc.origin {
1934                         HTLCInitiator::LocalOffered => {
1935                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1936                                         addl_htlcs += 1;
1937                                 }
1938                         },
1939                         HTLCInitiator::RemoteOffered => {
1940                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1941                                         addl_htlcs += 1;
1942                                 }
1943                         }
1944                 }
1945
1946                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1947                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1948                 // committed outbound HTLCs, see below.
1949                 let mut included_htlcs = 0;
1950                 for ref htlc in context.pending_inbound_htlcs.iter() {
1951                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1952                                 continue
1953                         }
1954                         included_htlcs += 1;
1955                 }
1956
1957                 for ref htlc in context.pending_outbound_htlcs.iter() {
1958                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1959                                 continue
1960                         }
1961                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1962                         // i.e. if they've responded to us with an RAA after announcement.
1963                         match htlc.state {
1964                                 OutboundHTLCState::Committed => included_htlcs += 1,
1965                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1966                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1967                                 _ => {},
1968                         }
1969                 }
1970
1971                 let num_htlcs = included_htlcs + addl_htlcs;
1972                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1973                 #[cfg(any(test, fuzzing))]
1974                 {
1975                         let mut fee = res;
1976                         if fee_spike_buffer_htlc.is_some() {
1977                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1978                         }
1979                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1980                         let commitment_tx_info = CommitmentTxInfoCached {
1981                                 fee,
1982                                 total_pending_htlcs,
1983                                 next_holder_htlc_id: match htlc.origin {
1984                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1985                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1986                                 },
1987                                 next_counterparty_htlc_id: match htlc.origin {
1988                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1989                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1990                                 },
1991                                 feerate: context.feerate_per_kw,
1992                         };
1993                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1994                 }
1995                 res
1996         }
1997
1998         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
1999                 where F: Fn() -> Option<O> {
2000                 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2001                    self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2002                         f()
2003                 } else {
2004                         None
2005                 }
2006         }
2007
2008         /// Returns the transaction if there is a pending funding transaction that is yet to be
2009         /// broadcast.
2010         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2011                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2012         }
2013
2014         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2015         /// broadcast.
2016         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2017                 self.if_unbroadcasted_funding(||
2018                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2019                 )
2020         }
2021
2022         /// Returns whether the channel is funded in a batch.
2023         pub fn is_batch_funding(&self) -> bool {
2024                 self.is_batch_funding.is_some()
2025         }
2026
2027         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2028         /// broadcast.
2029         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2030                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2031         }
2032
2033         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2034         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2035         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2036         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2037         /// immediately (others we will have to allow to time out).
2038         pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2039                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2040                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2041                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2042                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2043                 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2044
2045                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2046                 // return them to fail the payment.
2047                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2048                 let counterparty_node_id = self.get_counterparty_node_id();
2049                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2050                         match htlc_update {
2051                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2052                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2053                                 },
2054                                 _ => {}
2055                         }
2056                 }
2057                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2058                         // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2059                         // returning a channel monitor update here would imply a channel monitor update before
2060                         // we even registered the channel monitor to begin with, which is invalid.
2061                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
2062                         // funding transaction, don't return a funding txo (which prevents providing the
2063                         // monitor update to the user, even if we return one).
2064                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2065                         if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2066                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2067                                 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2068                                         update_id: self.latest_monitor_update_id,
2069                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2070                                 }))
2071                         } else { None }
2072                 } else { None };
2073                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2074
2075                 self.channel_state = ChannelState::ShutdownComplete as u32;
2076                 self.update_time_counter += 1;
2077                 ShutdownResult {
2078                         monitor_update,
2079                         dropped_outbound_htlcs,
2080                         unbroadcasted_batch_funding_txid,
2081                 }
2082         }
2083 }
2084
2085 // Internal utility functions for channels
2086
2087 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2088 /// `channel_value_satoshis` in msat, set through
2089 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2090 ///
2091 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2092 ///
2093 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2094 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2095         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2096                 1
2097         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2098                 100
2099         } else {
2100                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2101         };
2102         channel_value_satoshis * 10 * configured_percent
2103 }
2104
2105 /// Returns a minimum channel reserve value the remote needs to maintain,
2106 /// required by us according to the configured or default
2107 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2108 ///
2109 /// Guaranteed to return a value no larger than channel_value_satoshis
2110 ///
2111 /// This is used both for outbound and inbound channels and has lower bound
2112 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2113 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2114         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2115         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2116 }
2117
2118 /// This is for legacy reasons, present for forward-compatibility.
2119 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2120 /// from storage. Hence, we use this function to not persist default values of
2121 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2122 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2123         let (q, _) = channel_value_satoshis.overflowing_div(100);
2124         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2125 }
2126
2127 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2128 // Note that num_htlcs should not include dust HTLCs.
2129 #[inline]
2130 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2131         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2132 }
2133
2134 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2135 // Note that num_htlcs should not include dust HTLCs.
2136 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2137         // Note that we need to divide before multiplying to round properly,
2138         // since the lowest denomination of bitcoin on-chain is the satoshi.
2139         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2140 }
2141
2142 // Holder designates channel data owned for the benefit of the user client.
2143 // Counterparty designates channel data owned by the another channel participant entity.
2144 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2145         pub context: ChannelContext<SP>,
2146 }
2147
2148 #[cfg(any(test, fuzzing))]
2149 struct CommitmentTxInfoCached {
2150         fee: u64,
2151         total_pending_htlcs: usize,
2152         next_holder_htlc_id: u64,
2153         next_counterparty_htlc_id: u64,
2154         feerate: u32,
2155 }
2156
2157 impl<SP: Deref> Channel<SP> where
2158         SP::Target: SignerProvider,
2159         <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2160 {
2161         fn check_remote_fee<F: Deref, L: Deref>(
2162                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2163                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2164         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2165         {
2166                 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2167                 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2168                 // We generally don't care too much if they set the feerate to something very high, but it
2169                 // could result in the channel being useless due to everything being dust. This doesn't
2170                 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2171                 // zero fee, so their fee is no longer considered to determine dust limits.
2172                 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2173                         let upper_limit =
2174                                 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64;
2175                         if feerate_per_kw as u64 > upper_limit {
2176                                 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2177                         }
2178                 }
2179
2180                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2181                         ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2182                 } else {
2183                         ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2184                 };
2185                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2186                 if feerate_per_kw < lower_limit {
2187                         if let Some(cur_feerate) = cur_feerate_per_kw {
2188                                 if feerate_per_kw > cur_feerate {
2189                                         log_warn!(logger,
2190                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2191                                                 cur_feerate, feerate_per_kw);
2192                                         return Ok(());
2193                                 }
2194                         }
2195                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2196                 }
2197                 Ok(())
2198         }
2199
2200         #[inline]
2201         fn get_closing_scriptpubkey(&self) -> Script {
2202                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2203                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2204                 // outside of those situations will fail.
2205                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2206         }
2207
2208         #[inline]
2209         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2210                 let mut ret =
2211                 (4 +                                                   // version
2212                  1 +                                                   // input count
2213                  36 +                                                  // prevout
2214                  1 +                                                   // script length (0)
2215                  4 +                                                   // sequence
2216                  1 +                                                   // output count
2217                  4                                                     // lock time
2218                  )*4 +                                                 // * 4 for non-witness parts
2219                 2 +                                                    // witness marker and flag
2220                 1 +                                                    // witness element count
2221                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
2222                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2223                 2*(1 + 71);                                            // two signatures + sighash type flags
2224                 if let Some(spk) = a_scriptpubkey {
2225                         ret += ((8+1) +                                    // output values and script length
2226                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2227                 }
2228                 if let Some(spk) = b_scriptpubkey {
2229                         ret += ((8+1) +                                    // output values and script length
2230                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2231                 }
2232                 ret
2233         }
2234
2235         #[inline]
2236         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2237                 assert!(self.context.pending_inbound_htlcs.is_empty());
2238                 assert!(self.context.pending_outbound_htlcs.is_empty());
2239                 assert!(self.context.pending_update_fee.is_none());
2240
2241                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2242                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2243                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2244
2245                 if value_to_holder < 0 {
2246                         assert!(self.context.is_outbound());
2247                         total_fee_satoshis += (-value_to_holder) as u64;
2248                 } else if value_to_counterparty < 0 {
2249                         assert!(!self.context.is_outbound());
2250                         total_fee_satoshis += (-value_to_counterparty) as u64;
2251                 }
2252
2253                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2254                         value_to_counterparty = 0;
2255                 }
2256
2257                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2258                         value_to_holder = 0;
2259                 }
2260
2261                 assert!(self.context.shutdown_scriptpubkey.is_some());
2262                 let holder_shutdown_script = self.get_closing_scriptpubkey();
2263                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2264                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2265
2266                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2267                 (closing_transaction, total_fee_satoshis)
2268         }
2269
2270         fn funding_outpoint(&self) -> OutPoint {
2271                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2272         }
2273
2274         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2275         /// entirely.
2276         ///
2277         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2278         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2279         ///
2280         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2281         /// disconnected).
2282         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2283                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2284         where L::Target: Logger {
2285                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2286                 // (see equivalent if condition there).
2287                 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2288                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2289                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2290                 self.context.latest_monitor_update_id = mon_update_id;
2291                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2292                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2293                 }
2294         }
2295
2296         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2297                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2298                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2299                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2300                 // either.
2301                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2302                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2303                 }
2304                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2305
2306                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2307                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2308                 // these, but for now we just have to treat them as normal.
2309
2310                 let mut pending_idx = core::usize::MAX;
2311                 let mut htlc_value_msat = 0;
2312                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2313                         if htlc.htlc_id == htlc_id_arg {
2314                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2315                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2316                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2317                                 match htlc.state {
2318                                         InboundHTLCState::Committed => {},
2319                                         InboundHTLCState::LocalRemoved(ref reason) => {
2320                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2321                                                 } else {
2322                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2323                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2324                                                 }
2325                                                 return UpdateFulfillFetch::DuplicateClaim {};
2326                                         },
2327                                         _ => {
2328                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2329                                                 // Don't return in release mode here so that we can update channel_monitor
2330                                         }
2331                                 }
2332                                 pending_idx = idx;
2333                                 htlc_value_msat = htlc.amount_msat;
2334                                 break;
2335                         }
2336                 }
2337                 if pending_idx == core::usize::MAX {
2338                         #[cfg(any(test, fuzzing))]
2339                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2340                         // this is simply a duplicate claim, not previously failed and we lost funds.
2341                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2342                         return UpdateFulfillFetch::DuplicateClaim {};
2343                 }
2344
2345                 // Now update local state:
2346                 //
2347                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2348                 // can claim it even if the channel hits the chain before we see their next commitment.
2349                 self.context.latest_monitor_update_id += 1;
2350                 let monitor_update = ChannelMonitorUpdate {
2351                         update_id: self.context.latest_monitor_update_id,
2352                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2353                                 payment_preimage: payment_preimage_arg.clone(),
2354                         }],
2355                 };
2356
2357                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2358                         // Note that this condition is the same as the assertion in
2359                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2360                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2361                         // do not not get into this branch.
2362                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2363                                 match pending_update {
2364                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2365                                                 if htlc_id_arg == htlc_id {
2366                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
2367                                                         self.context.latest_monitor_update_id -= 1;
2368                                                         #[cfg(any(test, fuzzing))]
2369                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2370                                                         return UpdateFulfillFetch::DuplicateClaim {};
2371                                                 }
2372                                         },
2373                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2374                                                 if htlc_id_arg == htlc_id {
2375                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2376                                                         // TODO: We may actually be able to switch to a fulfill here, though its
2377                                                         // rare enough it may not be worth the complexity burden.
2378                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2379                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2380                                                 }
2381                                         },
2382                                         _ => {}
2383                                 }
2384                         }
2385                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2386                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2387                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2388                         });
2389                         #[cfg(any(test, fuzzing))]
2390                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2391                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2392                 }
2393                 #[cfg(any(test, fuzzing))]
2394                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2395
2396                 {
2397                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2398                         if let InboundHTLCState::Committed = htlc.state {
2399                         } else {
2400                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2401                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2402                         }
2403                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2404                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2405                 }
2406
2407                 UpdateFulfillFetch::NewClaim {
2408                         monitor_update,
2409                         htlc_value_msat,
2410                         msg: Some(msgs::UpdateFulfillHTLC {
2411                                 channel_id: self.context.channel_id(),
2412                                 htlc_id: htlc_id_arg,
2413                                 payment_preimage: payment_preimage_arg,
2414                         }),
2415                 }
2416         }
2417
2418         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2419                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2420                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2421                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2422                                 // Even if we aren't supposed to let new monitor updates with commitment state
2423                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2424                                 // matter what. Sadly, to push a new monitor update which flies before others
2425                                 // already queued, we have to insert it into the pending queue and update the
2426                                 // update_ids of all the following monitors.
2427                                 if release_cs_monitor && msg.is_some() {
2428                                         let mut additional_update = self.build_commitment_no_status_check(logger);
2429                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
2430                                         // to be strictly increasing by one, so decrement it here.
2431                                         self.context.latest_monitor_update_id = monitor_update.update_id;
2432                                         monitor_update.updates.append(&mut additional_update.updates);
2433                                 } else {
2434                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
2435                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2436                                         monitor_update.update_id = new_mon_id;
2437                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
2438                                                 held_update.update.update_id += 1;
2439                                         }
2440                                         if msg.is_some() {
2441                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2442                                                 let update = self.build_commitment_no_status_check(logger);
2443                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2444                                                         update,
2445                                                 });
2446                                         }
2447                                 }
2448
2449                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2450                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2451                         },
2452                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2453                 }
2454         }
2455
2456         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2457         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2458         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2459         /// before we fail backwards.
2460         ///
2461         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2462         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2463         /// [`ChannelError::Ignore`].
2464         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2465         -> Result<(), ChannelError> where L::Target: Logger {
2466                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2467                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2468         }
2469
2470         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2471         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2472         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2473         /// before we fail backwards.
2474         ///
2475         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2476         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2477         /// [`ChannelError::Ignore`].
2478         fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2479         -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2480                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2481                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
2482                 }
2483                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2484
2485                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2486                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2487                 // these, but for now we just have to treat them as normal.
2488
2489                 let mut pending_idx = core::usize::MAX;
2490                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2491                         if htlc.htlc_id == htlc_id_arg {
2492                                 match htlc.state {
2493                                         InboundHTLCState::Committed => {},
2494                                         InboundHTLCState::LocalRemoved(ref reason) => {
2495                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2496                                                 } else {
2497                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2498                                                 }
2499                                                 return Ok(None);
2500                                         },
2501                                         _ => {
2502                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2503                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2504                                         }
2505                                 }
2506                                 pending_idx = idx;
2507                         }
2508                 }
2509                 if pending_idx == core::usize::MAX {
2510                         #[cfg(any(test, fuzzing))]
2511                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2512                         // is simply a duplicate fail, not previously failed and we failed-back too early.
2513                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2514                         return Ok(None);
2515                 }
2516
2517                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2518                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2519                         force_holding_cell = true;
2520                 }
2521
2522                 // Now update local state:
2523                 if force_holding_cell {
2524                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2525                                 match pending_update {
2526                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2527                                                 if htlc_id_arg == htlc_id {
2528                                                         #[cfg(any(test, fuzzing))]
2529                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2530                                                         return Ok(None);
2531                                                 }
2532                                         },
2533                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2534                                                 if htlc_id_arg == htlc_id {
2535                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2536                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2537                                                 }
2538                                         },
2539                                         _ => {}
2540                                 }
2541                         }
2542                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2543                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2544                                 htlc_id: htlc_id_arg,
2545                                 err_packet,
2546                         });
2547                         return Ok(None);
2548                 }
2549
2550                 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2551                 {
2552                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2553                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2554                 }
2555
2556                 Ok(Some(msgs::UpdateFailHTLC {
2557                         channel_id: self.context.channel_id(),
2558                         htlc_id: htlc_id_arg,
2559                         reason: err_packet
2560                 }))
2561         }
2562
2563         // Message handlers:
2564
2565         /// Handles a funding_signed message from the remote end.
2566         /// If this call is successful, broadcast the funding transaction (and not before!)
2567         pub fn funding_signed<L: Deref>(
2568                 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2569         ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2570         where
2571                 L::Target: Logger
2572         {
2573                 if !self.context.is_outbound() {
2574                         return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2575                 }
2576                 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2577                         return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2578                 }
2579                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2580                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2581                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2582                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2583                 }
2584
2585                 let funding_script = self.context.get_funding_redeemscript();
2586
2587                 let counterparty_keys = self.context.build_remote_transaction_keys();
2588                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2589                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2590                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2591
2592                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2593                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2594
2595                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2596                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2597                 {
2598                         let trusted_tx = initial_commitment_tx.trust();
2599                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2600                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2601                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2602                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2603                                 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2604                         }
2605                 }
2606
2607                 let holder_commitment_tx = HolderCommitmentTransaction::new(
2608                         initial_commitment_tx,
2609                         msg.signature,
2610                         Vec::new(),
2611                         &self.context.get_holder_pubkeys().funding_pubkey,
2612                         self.context.counterparty_funding_pubkey()
2613                 );
2614
2615                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2616                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2617
2618
2619                 let funding_redeemscript = self.context.get_funding_redeemscript();
2620                 let funding_txo = self.context.get_funding_txo().unwrap();
2621                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2622                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2623                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2624                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2625                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2626                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2627                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
2628                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
2629                                                           &self.context.channel_transaction_parameters,
2630                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
2631                                                           obscure_factor,
2632                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
2633
2634                 channel_monitor.provide_initial_counterparty_commitment_tx(
2635                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
2636                         self.context.cur_counterparty_commitment_transaction_number,
2637                         self.context.counterparty_cur_commitment_point.unwrap(),
2638                         counterparty_initial_commitment_tx.feerate_per_kw(),
2639                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2640                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2641
2642                 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2643                 if self.context.is_batch_funding() {
2644                         self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2645                 } else {
2646                         self.context.channel_state = ChannelState::FundingSent as u32;
2647                 }
2648                 self.context.cur_holder_commitment_transaction_number -= 1;
2649                 self.context.cur_counterparty_commitment_transaction_number -= 1;
2650
2651                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2652
2653                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2654                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2655                 Ok(channel_monitor)
2656         }
2657
2658         /// Updates the state of the channel to indicate that all channels in the batch have received
2659         /// funding_signed and persisted their monitors.
2660         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2661         /// treated as a non-batch channel going forward.
2662         pub fn set_batch_ready(&mut self) {
2663                 self.context.is_batch_funding = None;
2664                 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2665         }
2666
2667         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2668         /// and the channel is now usable (and public), this may generate an announcement_signatures to
2669         /// reply with.
2670         pub fn channel_ready<NS: Deref, L: Deref>(
2671                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2672                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2673         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2674         where
2675                 NS::Target: NodeSigner,
2676                 L::Target: Logger
2677         {
2678                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2679                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2680                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2681                 }
2682
2683                 if let Some(scid_alias) = msg.short_channel_id_alias {
2684                         if Some(scid_alias) != self.context.short_channel_id {
2685                                 // The scid alias provided can be used to route payments *from* our counterparty,
2686                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
2687                                 // when routing outbound payments.
2688                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
2689                         }
2690                 }
2691
2692                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2693
2694                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2695                 // batch, but we can receive channel_ready messages.
2696                 debug_assert!(
2697                         non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2698                         non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2699                 );
2700                 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2701                         self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2702                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2703                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2704                         self.context.update_time_counter += 1;
2705                 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2706                         // If we reconnected before sending our `channel_ready` they may still resend theirs:
2707                         (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2708                                               (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2709                 {
2710                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
2711                         // required, or they're sending a fresh SCID alias.
2712                         let expected_point =
2713                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2714                                         // If they haven't ever sent an updated point, the point they send should match
2715                                         // the current one.
2716                                         self.context.counterparty_cur_commitment_point
2717                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2718                                         // If we've advanced the commitment number once, the second commitment point is
2719                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
2720                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2721                                         self.context.counterparty_prev_commitment_point
2722                                 } else {
2723                                         // If they have sent updated points, channel_ready is always supposed to match
2724                                         // their "first" point, which we re-derive here.
2725                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2726                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2727                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
2728                                 };
2729                         if expected_point != Some(msg.next_per_commitment_point) {
2730                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2731                         }
2732                         return Ok(None);
2733                 } else {
2734                         return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2735                 }
2736
2737                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2738                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2739
2740                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2741
2742                 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2743         }
2744
2745         pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2746                 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2747                 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2748         ) -> Result<(), ChannelError>
2749         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2750                 FE::Target: FeeEstimator, L::Target: Logger,
2751         {
2752                 // We can't accept HTLCs sent after we've sent a shutdown.
2753                 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2754                 if local_sent_shutdown {
2755                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2756                 }
2757                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2758                 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2759                 if remote_sent_shutdown {
2760                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2761                 }
2762                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2763                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2764                 }
2765                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2766                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2767                 }
2768                 if msg.amount_msat == 0 {
2769                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2770                 }
2771                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2772                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2773                 }
2774
2775                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2776                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2777                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2778                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2779                 }
2780                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2781                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2782                 }
2783
2784                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2785                 // the reserve_satoshis we told them to always have as direct payment so that they lose
2786                 // something if we punish them for broadcasting an old state).
2787                 // Note that we don't really care about having a small/no to_remote output in our local
2788                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2789                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2790                 // present in the next commitment transaction we send them (at least for fulfilled ones,
2791                 // failed ones won't modify value_to_self).
2792                 // Note that we will send HTLCs which another instance of rust-lightning would think
2793                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2794                 // Channel state once they will not be present in the next received commitment
2795                 // transaction).
2796                 let mut removed_outbound_total_msat = 0;
2797                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2798                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2799                                 removed_outbound_total_msat += htlc.amount_msat;
2800                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2801                                 removed_outbound_total_msat += htlc.amount_msat;
2802                         }
2803                 }
2804
2805                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2806                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2807                         (0, 0)
2808                 } else {
2809                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2810                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2811                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2812                 };
2813                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2814                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2815                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2816                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2817                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2818                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2819                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2820                         }
2821                 }
2822
2823                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2824                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2825                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2826                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2827                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2828                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2829                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2830                         }
2831                 }
2832
2833                 let pending_value_to_self_msat =
2834                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2835                 let pending_remote_value_msat =
2836                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2837                 if pending_remote_value_msat < msg.amount_msat {
2838                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2839                 }
2840
2841                 // Check that the remote can afford to pay for this HTLC on-chain at the current
2842                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2843                 {
2844                         let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2845                                 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2846                                 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2847                         };
2848                         let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2849                                 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2850                         } else {
2851                                 0
2852                         };
2853                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2854                                 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2855                         };
2856                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2857                                 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2858                         }
2859                 }
2860
2861                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2862                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2863                 } else {
2864                         0
2865                 };
2866                 if !self.context.is_outbound() {
2867                         // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2868                         // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2869                         // side, only on the sender's. Note that with anchor outputs we are no longer as
2870                         // sensitive to fee spikes, so we need to account for them.
2871                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2872                         let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2873                         if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2874                                 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2875                         }
2876                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2877                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2878                                 // the HTLC, i.e. its status is already set to failing.
2879                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2880                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2881                         }
2882                 } else {
2883                         // Check that they won't violate our local required channel reserve by adding this HTLC.
2884                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2885                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2886                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2887                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2888                         }
2889                 }
2890                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2891                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2892                 }
2893                 if msg.cltv_expiry >= 500000000 {
2894                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2895                 }
2896
2897                 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2898                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2899                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2900                         }
2901                 }
2902
2903                 // Now update local state:
2904                 self.context.next_counterparty_htlc_id += 1;
2905                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2906                         htlc_id: msg.htlc_id,
2907                         amount_msat: msg.amount_msat,
2908                         payment_hash: msg.payment_hash,
2909                         cltv_expiry: msg.cltv_expiry,
2910                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
2911                 });
2912                 Ok(())
2913         }
2914
2915         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2916         #[inline]
2917         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
2918                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
2919                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
2920                         if htlc.htlc_id == htlc_id {
2921                                 let outcome = match check_preimage {
2922                                         None => fail_reason.into(),
2923                                         Some(payment_preimage) => {
2924                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
2925                                                 if payment_hash != htlc.payment_hash {
2926                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
2927                                                 }
2928                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
2929                                         }
2930                                 };
2931                                 match htlc.state {
2932                                         OutboundHTLCState::LocalAnnounced(_) =>
2933                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
2934                                         OutboundHTLCState::Committed => {
2935                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
2936                                         },
2937                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
2938                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
2939                                 }
2940                                 return Ok(htlc);
2941                         }
2942                 }
2943                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
2944         }
2945
2946         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
2947                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2948                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
2949                 }
2950                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2951                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
2952                 }
2953
2954                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
2955         }
2956
2957         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2958                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2959                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
2960                 }
2961                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2962                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
2963                 }
2964
2965                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2966                 Ok(())
2967         }
2968
2969         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2970                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2971                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
2972                 }
2973                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2974                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
2975                 }
2976
2977                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2978                 Ok(())
2979         }
2980
2981         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
2982                 where L::Target: Logger
2983         {
2984                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2985                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
2986                 }
2987                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2988                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
2989                 }
2990                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
2991                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
2992                 }
2993
2994                 let funding_script = self.context.get_funding_redeemscript();
2995
2996                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2997
2998                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
2999                 let commitment_txid = {
3000                         let trusted_tx = commitment_stats.tx.trust();
3001                         let bitcoin_tx = trusted_tx.built_transaction();
3002                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3003
3004                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3005                                 log_bytes!(msg.signature.serialize_compact()[..]),
3006                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3007                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3008                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3009                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3010                         }
3011                         bitcoin_tx.txid
3012                 };
3013                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3014
3015                 // If our counterparty updated the channel fee in this commitment transaction, check that
3016                 // they can actually afford the new fee now.
3017                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3018                         update_state == FeeUpdateState::RemoteAnnounced
3019                 } else { false };
3020                 if update_fee {
3021                         debug_assert!(!self.context.is_outbound());
3022                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3023                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3024                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3025                         }
3026                 }
3027                 #[cfg(any(test, fuzzing))]
3028                 {
3029                         if self.context.is_outbound() {
3030                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3031                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3032                                 if let Some(info) = projected_commit_tx_info {
3033                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3034                                                 + self.context.holding_cell_htlc_updates.len();
3035                                         if info.total_pending_htlcs == total_pending_htlcs
3036                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3037                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3038                                                 && info.feerate == self.context.feerate_per_kw {
3039                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3040                                                 }
3041                                 }
3042                         }
3043                 }
3044
3045                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3046                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3047                 }
3048
3049                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3050                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3051                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3052                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3053                 // backwards compatibility, we never use it in production. To provide test coverage, here,
3054                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3055                 #[allow(unused_assignments, unused_mut)]
3056                 let mut separate_nondust_htlc_sources = false;
3057                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3058                         use core::hash::{BuildHasher, Hasher};
3059                         // Get a random value using the only std API to do so - the DefaultHasher
3060                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3061                         separate_nondust_htlc_sources = rand_val % 2 == 0;
3062                 }
3063
3064                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3065                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3066                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3067                         if let Some(_) = htlc.transaction_output_index {
3068                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3069                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3070                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3071
3072                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3073                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3074                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3075                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3076                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3077                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3078                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3079                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3080                                 }
3081                                 if !separate_nondust_htlc_sources {
3082                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3083                                 }
3084                         } else {
3085                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3086                         }
3087                         if separate_nondust_htlc_sources {
3088                                 if let Some(source) = source_opt.take() {
3089                                         nondust_htlc_sources.push(source);
3090                                 }
3091                         }
3092                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3093                 }
3094
3095                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3096                         commitment_stats.tx,
3097                         msg.signature,
3098                         msg.htlc_signatures.clone(),
3099                         &self.context.get_holder_pubkeys().funding_pubkey,
3100                         self.context.counterparty_funding_pubkey()
3101                 );
3102
3103                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3104                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3105
3106                 // Update state now that we've passed all the can-fail calls...
3107                 let mut need_commitment = false;
3108                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3109                         if *update_state == FeeUpdateState::RemoteAnnounced {
3110                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3111                                 need_commitment = true;
3112                         }
3113                 }
3114
3115                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3116                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3117                                 Some(forward_info.clone())
3118                         } else { None };
3119                         if let Some(forward_info) = new_forward {
3120                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3121                                         &htlc.payment_hash, &self.context.channel_id);
3122                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3123                                 need_commitment = true;
3124                         }
3125                 }
3126                 let mut claimed_htlcs = Vec::new();
3127                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3128                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3129                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3130                                         &htlc.payment_hash, &self.context.channel_id);
3131                                 // Grab the preimage, if it exists, instead of cloning
3132                                 let mut reason = OutboundHTLCOutcome::Success(None);
3133                                 mem::swap(outcome, &mut reason);
3134                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3135                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3136                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3137                                         // have a `Success(None)` reason. In this case we could forget some HTLC
3138                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
3139                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
3140                                         // claim anyway.
3141                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3142                                 }
3143                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3144                                 need_commitment = true;
3145                         }
3146                 }
3147
3148                 self.context.latest_monitor_update_id += 1;
3149                 let mut monitor_update = ChannelMonitorUpdate {
3150                         update_id: self.context.latest_monitor_update_id,
3151                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3152                                 commitment_tx: holder_commitment_tx,
3153                                 htlc_outputs: htlcs_and_sigs,
3154                                 claimed_htlcs,
3155                                 nondust_htlc_sources,
3156                         }]
3157                 };
3158
3159                 self.context.cur_holder_commitment_transaction_number -= 1;
3160                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3161                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3162                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3163
3164                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3165                         // In case we initially failed monitor updating without requiring a response, we need
3166                         // to make sure the RAA gets sent first.
3167                         self.context.monitor_pending_revoke_and_ack = true;
3168                         if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3169                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3170                                 // the corresponding HTLC status updates so that get_last_commitment_update
3171                                 // includes the right HTLCs.
3172                                 self.context.monitor_pending_commitment_signed = true;
3173                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3174                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3175                                 // strictly increasing by one, so decrement it here.
3176                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3177                                 monitor_update.updates.append(&mut additional_update.updates);
3178                         }
3179                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3180                                 &self.context.channel_id);
3181                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
3182                 }
3183
3184                 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3185                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3186                         // we'll send one right away when we get the revoke_and_ack when we
3187                         // free_holding_cell_htlcs().
3188                         let mut additional_update = self.build_commitment_no_status_check(logger);
3189                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3190                         // strictly increasing by one, so decrement it here.
3191                         self.context.latest_monitor_update_id = monitor_update.update_id;
3192                         monitor_update.updates.append(&mut additional_update.updates);
3193                         true
3194                 } else { false };
3195
3196                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3197                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3198                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3199                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3200         }
3201
3202         /// Public version of the below, checking relevant preconditions first.
3203         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3204         /// returns `(None, Vec::new())`.
3205         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3206                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3207         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3208         where F::Target: FeeEstimator, L::Target: Logger
3209         {
3210                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3211                    (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3212                         self.free_holding_cell_htlcs(fee_estimator, logger)
3213                 } else { (None, Vec::new()) }
3214         }
3215
3216         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3217         /// for our counterparty.
3218         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3219                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3220         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3221         where F::Target: FeeEstimator, L::Target: Logger
3222         {
3223                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3224                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3225                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3226                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3227
3228                         let mut monitor_update = ChannelMonitorUpdate {
3229                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3230                                 updates: Vec::new(),
3231                         };
3232
3233                         let mut htlc_updates = Vec::new();
3234                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3235                         let mut update_add_count = 0;
3236                         let mut update_fulfill_count = 0;
3237                         let mut update_fail_count = 0;
3238                         let mut htlcs_to_fail = Vec::new();
3239                         for htlc_update in htlc_updates.drain(..) {
3240                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
3241                                 // fee races with adding too many outputs which push our total payments just over
3242                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
3243                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3244                                 // to rebalance channels.
3245                                 match &htlc_update {
3246                                         &HTLCUpdateAwaitingACK::AddHTLC {
3247                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3248                                                 skimmed_fee_msat, ..
3249                                         } => {
3250                                                 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3251                                                         onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3252                                                 {
3253                                                         Ok(_) => update_add_count += 1,
3254                                                         Err(e) => {
3255                                                                 match e {
3256                                                                         ChannelError::Ignore(ref msg) => {
3257                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3258                                                                                 // If we fail to send here, then this HTLC should
3259                                                                                 // be failed backwards. Failing to send here
3260                                                                                 // indicates that this HTLC may keep being put back
3261                                                                                 // into the holding cell without ever being
3262                                                                                 // successfully forwarded/failed/fulfilled, causing
3263                                                                                 // our counterparty to eventually close on us.
3264                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
3265                                                                         },
3266                                                                         _ => {
3267                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3268                                                                         },
3269                                                                 }
3270                                                         }
3271                                                 }
3272                                         },
3273                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3274                                                 // If an HTLC claim was previously added to the holding cell (via
3275                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
3276                                                 // not fail - any in between attempts to claim the HTLC will have resulted
3277                                                 // in it hitting the holding cell again and we cannot change the state of a
3278                                                 // holding cell HTLC from fulfill to anything else.
3279                                                 let mut additional_monitor_update =
3280                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3281                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3282                                                         { monitor_update } else { unreachable!() };
3283                                                 update_fulfill_count += 1;
3284                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
3285                                         },
3286                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3287                                                 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3288                                                         Ok(update_fail_msg_option) => {
3289                                                                 // If an HTLC failure was previously added to the holding cell (via
3290                                                                 // `queue_fail_htlc`) then generating the fail message itself must
3291                                                                 // not fail - we should never end up in a state where we double-fail
3292                                                                 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3293                                                                 // for a full revocation before failing.
3294                                                                 debug_assert!(update_fail_msg_option.is_some());
3295                                                                 update_fail_count += 1;
3296                                                         },
3297                                                         Err(e) => {
3298                                                                 if let ChannelError::Ignore(_) = e {}
3299                                                                 else {
3300                                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3301                                                                 }
3302                                                         }
3303                                                 }
3304                                         },
3305                                 }
3306                         }
3307                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3308                                 return (None, htlcs_to_fail);
3309                         }
3310                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3311                                 self.send_update_fee(feerate, false, fee_estimator, logger)
3312                         } else {
3313                                 None
3314                         };
3315
3316                         let mut additional_update = self.build_commitment_no_status_check(logger);
3317                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3318                         // but we want them to be strictly increasing by one, so reset it here.
3319                         self.context.latest_monitor_update_id = monitor_update.update_id;
3320                         monitor_update.updates.append(&mut additional_update.updates);
3321
3322                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3323                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3324                                 update_add_count, update_fulfill_count, update_fail_count);
3325
3326                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3327                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3328                 } else {
3329                         (None, Vec::new())
3330                 }
3331         }
3332
3333         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3334         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3335         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3336         /// generating an appropriate error *after* the channel state has been updated based on the
3337         /// revoke_and_ack message.
3338         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3339                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3340         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3341         where F::Target: FeeEstimator, L::Target: Logger,
3342         {
3343                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3344                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3345                 }
3346                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3347                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3348                 }
3349                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3350                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3351                 }
3352
3353                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3354
3355                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3356                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3357                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3358                         }
3359                 }
3360
3361                 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3362                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
3363                         // haven't given them a new commitment transaction to broadcast). We should probably
3364                         // take advantage of this by updating our channel monitor, sending them an error, and
3365                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3366                         // lot of work, and there's some chance this is all a misunderstanding anyway.
3367                         // We have to do *something*, though, since our signer may get mad at us for otherwise
3368                         // jumping a remote commitment number, so best to just force-close and move on.
3369                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3370                 }
3371
3372                 #[cfg(any(test, fuzzing))]
3373                 {
3374                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3375                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3376                 }
3377
3378                 match &self.context.holder_signer {
3379                         ChannelSignerType::Ecdsa(ecdsa) => {
3380                                 ecdsa.validate_counterparty_revocation(
3381                                         self.context.cur_counterparty_commitment_transaction_number + 1,
3382                                         &secret
3383                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3384                         }
3385                 };
3386
3387                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3388                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3389                 self.context.latest_monitor_update_id += 1;
3390                 let mut monitor_update = ChannelMonitorUpdate {
3391                         update_id: self.context.latest_monitor_update_id,
3392                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3393                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3394                                 secret: msg.per_commitment_secret,
3395                         }],
3396                 };
3397
3398                 // Update state now that we've passed all the can-fail calls...
3399                 // (note that we may still fail to generate the new commitment_signed message, but that's
3400                 // OK, we step the channel here and *then* if the new generation fails we can fail the
3401                 // channel based on that, but stepping stuff here should be safe either way.
3402                 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3403                 self.context.sent_message_awaiting_response = None;
3404                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3405                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3406                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3407
3408                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3409                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3410                 }
3411
3412                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3413                 let mut to_forward_infos = Vec::new();
3414                 let mut revoked_htlcs = Vec::new();
3415                 let mut finalized_claimed_htlcs = Vec::new();
3416                 let mut update_fail_htlcs = Vec::new();
3417                 let mut update_fail_malformed_htlcs = Vec::new();
3418                 let mut require_commitment = false;
3419                 let mut value_to_self_msat_diff: i64 = 0;
3420
3421                 {
3422                         // Take references explicitly so that we can hold multiple references to self.context.
3423                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3424                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3425
3426                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3427                         pending_inbound_htlcs.retain(|htlc| {
3428                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3429                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3430                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3431                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
3432                                         }
3433                                         false
3434                                 } else { true }
3435                         });
3436                         pending_outbound_htlcs.retain(|htlc| {
3437                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3438                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3439                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3440                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3441                                         } else {
3442                                                 finalized_claimed_htlcs.push(htlc.source.clone());
3443                                                 // They fulfilled, so we sent them money
3444                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
3445                                         }
3446                                         false
3447                                 } else { true }
3448                         });
3449                         for htlc in pending_inbound_htlcs.iter_mut() {
3450                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3451                                         true
3452                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3453                                         true
3454                                 } else { false };
3455                                 if swap {
3456                                         let mut state = InboundHTLCState::Committed;
3457                                         mem::swap(&mut state, &mut htlc.state);
3458
3459                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3460                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3461                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3462                                                 require_commitment = true;
3463                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3464                                                 match forward_info {
3465                                                         PendingHTLCStatus::Fail(fail_msg) => {
3466                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3467                                                                 require_commitment = true;
3468                                                                 match fail_msg {
3469                                                                         HTLCFailureMsg::Relay(msg) => {
3470                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3471                                                                                 update_fail_htlcs.push(msg)
3472                                                                         },
3473                                                                         HTLCFailureMsg::Malformed(msg) => {
3474                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3475                                                                                 update_fail_malformed_htlcs.push(msg)
3476                                                                         },
3477                                                                 }
3478                                                         },
3479                                                         PendingHTLCStatus::Forward(forward_info) => {
3480                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3481                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
3482                                                                 htlc.state = InboundHTLCState::Committed;
3483                                                         }
3484                                                 }
3485                                         }
3486                                 }
3487                         }
3488                         for htlc in pending_outbound_htlcs.iter_mut() {
3489                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3490                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3491                                         htlc.state = OutboundHTLCState::Committed;
3492                                 }
3493                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3494                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3495                                         // Grab the preimage, if it exists, instead of cloning
3496                                         let mut reason = OutboundHTLCOutcome::Success(None);
3497                                         mem::swap(outcome, &mut reason);
3498                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3499                                         require_commitment = true;
3500                                 }
3501                         }
3502                 }
3503                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3504
3505                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3506                         match update_state {
3507                                 FeeUpdateState::Outbound => {
3508                                         debug_assert!(self.context.is_outbound());
3509                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3510                                         self.context.feerate_per_kw = feerate;
3511                                         self.context.pending_update_fee = None;
3512                                 },
3513                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3514                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3515                                         debug_assert!(!self.context.is_outbound());
3516                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3517                                         require_commitment = true;
3518                                         self.context.feerate_per_kw = feerate;
3519                                         self.context.pending_update_fee = None;
3520                                 },
3521                         }
3522                 }
3523
3524                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3525                 let release_state_str =
3526                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3527                 macro_rules! return_with_htlcs_to_fail {
3528                         ($htlcs_to_fail: expr) => {
3529                                 if !release_monitor {
3530                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3531                                                 update: monitor_update,
3532                                         });
3533                                         return Ok(($htlcs_to_fail, None));
3534                                 } else {
3535                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
3536                                 }
3537                         }
3538                 }
3539
3540                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3541                         // We can't actually generate a new commitment transaction (incl by freeing holding
3542                         // cells) while we can't update the monitor, so we just return what we have.
3543                         if require_commitment {
3544                                 self.context.monitor_pending_commitment_signed = true;
3545                                 // When the monitor updating is restored we'll call get_last_commitment_update(),
3546                                 // which does not update state, but we're definitely now awaiting a remote revoke
3547                                 // before we can step forward any more, so set it here.
3548                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3549                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3550                                 // strictly increasing by one, so decrement it here.
3551                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3552                                 monitor_update.updates.append(&mut additional_update.updates);
3553                         }
3554                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3555                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3556                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3557                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3558                         return_with_htlcs_to_fail!(Vec::new());
3559                 }
3560
3561                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3562                         (Some(mut additional_update), htlcs_to_fail) => {
3563                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3564                                 // strictly increasing by one, so decrement it here.
3565                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3566                                 monitor_update.updates.append(&mut additional_update.updates);
3567
3568                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3569                                         &self.context.channel_id(), release_state_str);
3570
3571                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3572                                 return_with_htlcs_to_fail!(htlcs_to_fail);
3573                         },
3574                         (None, htlcs_to_fail) => {
3575                                 if require_commitment {
3576                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3577
3578                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3579                                         // strictly increasing by one, so decrement it here.
3580                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3581                                         monitor_update.updates.append(&mut additional_update.updates);
3582
3583                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3584                                                 &self.context.channel_id(),
3585                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3586                                                 release_state_str);
3587
3588                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3589                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3590                                 } else {
3591                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3592                                                 &self.context.channel_id(), release_state_str);
3593
3594                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3595                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3596                                 }
3597                         }
3598                 }
3599         }
3600
3601         /// Queues up an outbound update fee by placing it in the holding cell. You should call
3602         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3603         /// commitment update.
3604         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3605                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3606         where F::Target: FeeEstimator, L::Target: Logger
3607         {
3608                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3609                 assert!(msg_opt.is_none(), "We forced holding cell?");
3610         }
3611
3612         /// Adds a pending update to this channel. See the doc for send_htlc for
3613         /// further details on the optionness of the return value.
3614         /// If our balance is too low to cover the cost of the next commitment transaction at the
3615         /// new feerate, the update is cancelled.
3616         ///
3617         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3618         /// [`Channel`] if `force_holding_cell` is false.
3619         fn send_update_fee<F: Deref, L: Deref>(
3620                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3621                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3622         ) -> Option<msgs::UpdateFee>
3623         where F::Target: FeeEstimator, L::Target: Logger
3624         {
3625                 if !self.context.is_outbound() {
3626                         panic!("Cannot send fee from inbound channel");
3627                 }
3628                 if !self.context.is_usable() {
3629                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3630                 }
3631                 if !self.context.is_live() {
3632                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3633                 }
3634
3635                 // Before proposing a feerate update, check that we can actually afford the new fee.
3636                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3637                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3638                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3639                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3640                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3641                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3642                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3643                         //TODO: auto-close after a number of failures?
3644                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3645                         return None;
3646                 }
3647
3648                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3649                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3650                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3651                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3652                 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3653                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3654                         return None;
3655                 }
3656                 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3657                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3658                         return None;
3659                 }
3660
3661                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3662                         force_holding_cell = true;
3663                 }
3664
3665                 if force_holding_cell {
3666                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
3667                         return None;
3668                 }
3669
3670                 debug_assert!(self.context.pending_update_fee.is_none());
3671                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3672
3673                 Some(msgs::UpdateFee {
3674                         channel_id: self.context.channel_id,
3675                         feerate_per_kw,
3676                 })
3677         }
3678
3679         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3680         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3681         /// resent.
3682         /// No further message handling calls may be made until a channel_reestablish dance has
3683         /// completed.
3684         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3685         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3686                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3687                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3688                         return Err(());
3689                 }
3690
3691                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3692                         // While the below code should be idempotent, it's simpler to just return early, as
3693                         // redundant disconnect events can fire, though they should be rare.
3694                         return Ok(());
3695                 }
3696
3697                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3698                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3699                 }
3700
3701                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3702                 // will be retransmitted.
3703                 self.context.last_sent_closing_fee = None;
3704                 self.context.pending_counterparty_closing_signed = None;
3705                 self.context.closing_fee_limits = None;
3706
3707                 let mut inbound_drop_count = 0;
3708                 self.context.pending_inbound_htlcs.retain(|htlc| {
3709                         match htlc.state {
3710                                 InboundHTLCState::RemoteAnnounced(_) => {
3711                                         // They sent us an update_add_htlc but we never got the commitment_signed.
3712                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
3713                                         // this HTLC accordingly
3714                                         inbound_drop_count += 1;
3715                                         false
3716                                 },
3717                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3718                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
3719                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3720                                         // in response to it yet, so don't touch it.
3721                                         true
3722                                 },
3723                                 InboundHTLCState::Committed => true,
3724                                 InboundHTLCState::LocalRemoved(_) => {
3725                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3726                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
3727                                         // (that we missed). Keep this around for now and if they tell us they missed
3728                                         // the commitment_signed we can re-transmit the update then.
3729                                         true
3730                                 },
3731                         }
3732                 });
3733                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3734
3735                 if let Some((_, update_state)) = self.context.pending_update_fee {
3736                         if update_state == FeeUpdateState::RemoteAnnounced {
3737                                 debug_assert!(!self.context.is_outbound());
3738                                 self.context.pending_update_fee = None;
3739                         }
3740                 }
3741
3742                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3743                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3744                                 // They sent us an update to remove this but haven't yet sent the corresponding
3745                                 // commitment_signed, we need to move it back to Committed and they can re-send
3746                                 // the update upon reconnection.
3747                                 htlc.state = OutboundHTLCState::Committed;
3748                         }
3749                 }
3750
3751                 self.context.sent_message_awaiting_response = None;
3752
3753                 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3754                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3755                 Ok(())
3756         }
3757
3758         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3759         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3760         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3761         /// update completes (potentially immediately).
3762         /// The messages which were generated with the monitor update must *not* have been sent to the
3763         /// remote end, and must instead have been dropped. They will be regenerated when
3764         /// [`Self::monitor_updating_restored`] is called.
3765         ///
3766         /// [`ChannelManager`]: super::channelmanager::ChannelManager
3767         /// [`chain::Watch`]: crate::chain::Watch
3768         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3769         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3770                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3771                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3772                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3773         ) {
3774                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3775                 self.context.monitor_pending_commitment_signed |= resend_commitment;
3776                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3777                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3778                 self.context.monitor_pending_failures.append(&mut pending_fails);
3779                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3780                 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3781         }
3782
3783         /// Indicates that the latest ChannelMonitor update has been committed by the client
3784         /// successfully and we should restore normal operation. Returns messages which should be sent
3785         /// to the remote side.
3786         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3787                 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3788                 user_config: &UserConfig, best_block_height: u32
3789         ) -> MonitorRestoreUpdates
3790         where
3791                 L::Target: Logger,
3792                 NS::Target: NodeSigner
3793         {
3794                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3795                 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3796
3797                 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3798                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3799                 // first received the funding_signed.
3800                 let mut funding_broadcastable =
3801                         if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3802                                 self.context.funding_transaction.take()
3803                         } else { None };
3804                 // That said, if the funding transaction is already confirmed (ie we're active with a
3805                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3806                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3807                         funding_broadcastable = None;
3808                 }
3809
3810                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3811                 // (and we assume the user never directly broadcasts the funding transaction and waits for
3812                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3813                 // * an inbound channel that failed to persist the monitor on funding_created and we got
3814                 //   the funding transaction confirmed before the monitor was persisted, or
3815                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3816                 let channel_ready = if self.context.monitor_pending_channel_ready {
3817                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3818                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3819                         self.context.monitor_pending_channel_ready = false;
3820                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3821                         Some(msgs::ChannelReady {
3822                                 channel_id: self.context.channel_id(),
3823                                 next_per_commitment_point,
3824                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3825                         })
3826                 } else { None };
3827
3828                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3829
3830                 let mut accepted_htlcs = Vec::new();
3831                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3832                 let mut failed_htlcs = Vec::new();
3833                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3834                 let mut finalized_claimed_htlcs = Vec::new();
3835                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3836
3837                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3838                         self.context.monitor_pending_revoke_and_ack = false;
3839                         self.context.monitor_pending_commitment_signed = false;
3840                         return MonitorRestoreUpdates {
3841                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3842                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3843                         };
3844                 }
3845
3846                 let raa = if self.context.monitor_pending_revoke_and_ack {
3847                         Some(self.get_last_revoke_and_ack())
3848                 } else { None };
3849                 let commitment_update = if self.context.monitor_pending_commitment_signed {
3850                         self.mark_awaiting_response();
3851                         Some(self.get_last_commitment_update(logger))
3852                 } else { None };
3853
3854                 self.context.monitor_pending_revoke_and_ack = false;
3855                 self.context.monitor_pending_commitment_signed = false;
3856                 let order = self.context.resend_order.clone();
3857                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3858                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3859                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3860                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3861                 MonitorRestoreUpdates {
3862                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3863                 }
3864         }
3865
3866         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3867                 where F::Target: FeeEstimator, L::Target: Logger
3868         {
3869                 if self.context.is_outbound() {
3870                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3871                 }
3872                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3873                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3874                 }
3875                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3876                 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3877
3878                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3879                 self.context.update_time_counter += 1;
3880                 // If the feerate has increased over the previous dust buffer (note that
3881                 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3882                 // won't be pushed over our dust exposure limit by the feerate increase.
3883                 if feerate_over_dust_buffer {
3884                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3885                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3886                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3887                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3888                         let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3889                         if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3890                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3891                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
3892                         }
3893                         if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3894                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3895                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3896                         }
3897                 }
3898                 Ok(())
3899         }
3900
3901         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
3902                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3903                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
3904                 msgs::RevokeAndACK {
3905                         channel_id: self.context.channel_id,
3906                         per_commitment_secret,
3907                         next_per_commitment_point,
3908                         #[cfg(taproot)]
3909                         next_local_nonce: None,
3910                 }
3911         }
3912
3913         fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
3914                 let mut update_add_htlcs = Vec::new();
3915                 let mut update_fulfill_htlcs = Vec::new();
3916                 let mut update_fail_htlcs = Vec::new();
3917                 let mut update_fail_malformed_htlcs = Vec::new();
3918
3919                 for htlc in self.context.pending_outbound_htlcs.iter() {
3920                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
3921                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
3922                                         channel_id: self.context.channel_id(),
3923                                         htlc_id: htlc.htlc_id,
3924                                         amount_msat: htlc.amount_msat,
3925                                         payment_hash: htlc.payment_hash,
3926                                         cltv_expiry: htlc.cltv_expiry,
3927                                         onion_routing_packet: (**onion_packet).clone(),
3928                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
3929                                 });
3930                         }
3931                 }
3932
3933                 for htlc in self.context.pending_inbound_htlcs.iter() {
3934                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3935                                 match reason {
3936                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
3937                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
3938                                                         channel_id: self.context.channel_id(),
3939                                                         htlc_id: htlc.htlc_id,
3940                                                         reason: err_packet.clone()
3941                                                 });
3942                                         },
3943                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
3944                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
3945                                                         channel_id: self.context.channel_id(),
3946                                                         htlc_id: htlc.htlc_id,
3947                                                         sha256_of_onion: sha256_of_onion.clone(),
3948                                                         failure_code: failure_code.clone(),
3949                                                 });
3950                                         },
3951                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
3952                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
3953                                                         channel_id: self.context.channel_id(),
3954                                                         htlc_id: htlc.htlc_id,
3955                                                         payment_preimage: payment_preimage.clone(),
3956                                                 });
3957                                         },
3958                                 }
3959                         }
3960                 }
3961
3962                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
3963                         Some(msgs::UpdateFee {
3964                                 channel_id: self.context.channel_id(),
3965                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
3966                         })
3967                 } else { None };
3968
3969                 log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3970                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
3971                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3972                 msgs::CommitmentUpdate {
3973                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
3974                         commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
3975                 }
3976         }
3977
3978         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
3979         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
3980                 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
3981                         assert!(self.context.shutdown_scriptpubkey.is_some());
3982                         Some(msgs::Shutdown {
3983                                 channel_id: self.context.channel_id,
3984                                 scriptpubkey: self.get_closing_scriptpubkey(),
3985                         })
3986                 } else { None }
3987         }
3988
3989         /// May panic if some calls other than message-handling calls (which will all Err immediately)
3990         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
3991         ///
3992         /// Some links printed in log lines are included here to check them during build (when run with
3993         /// `cargo doc --document-private-items`):
3994         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
3995         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
3996         pub fn channel_reestablish<L: Deref, NS: Deref>(
3997                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
3998                 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
3999         ) -> Result<ReestablishResponses, ChannelError>
4000         where
4001                 L::Target: Logger,
4002                 NS::Target: NodeSigner
4003         {
4004                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4005                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4006                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
4007                         // just close here instead of trying to recover.
4008                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4009                 }
4010
4011                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4012                         msg.next_local_commitment_number == 0 {
4013                         return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4014                 }
4015
4016                 if msg.next_remote_commitment_number > 0 {
4017                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4018                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4019                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4020                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4021                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4022                         }
4023                         if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4024                                 macro_rules! log_and_panic {
4025                                         ($err_msg: expr) => {
4026                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4027                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4028                                         }
4029                                 }
4030                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4031                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4032                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4033                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4034                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4035                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4036                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4037                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4038                         }
4039                 }
4040
4041                 // Before we change the state of the channel, we check if the peer is sending a very old
4042                 // commitment transaction number, if yes we send a warning message.
4043                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4044                 if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4045                         return Err(
4046                                 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4047                         );
4048                 }
4049
4050                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4051                 // remaining cases either succeed or ErrorMessage-fail).
4052                 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4053                 self.context.sent_message_awaiting_response = None;
4054
4055                 let shutdown_msg = self.get_outbound_shutdown();
4056
4057                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4058
4059                 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4060                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4061                         if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4062                                         self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4063                                 if msg.next_remote_commitment_number != 0 {
4064                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4065                                 }
4066                                 // Short circuit the whole handler as there is nothing we can resend them
4067                                 return Ok(ReestablishResponses {
4068                                         channel_ready: None,
4069                                         raa: None, commitment_update: None,
4070                                         order: RAACommitmentOrder::CommitmentFirst,
4071                                         shutdown_msg, announcement_sigs,
4072                                 });
4073                         }
4074
4075                         // We have OurChannelReady set!
4076                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4077                         return Ok(ReestablishResponses {
4078                                 channel_ready: Some(msgs::ChannelReady {
4079                                         channel_id: self.context.channel_id(),
4080                                         next_per_commitment_point,
4081                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
4082                                 }),
4083                                 raa: None, commitment_update: None,
4084                                 order: RAACommitmentOrder::CommitmentFirst,
4085                                 shutdown_msg, announcement_sigs,
4086                         });
4087                 }
4088
4089                 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4090                         // Remote isn't waiting on any RevokeAndACK from us!
4091                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4092                         None
4093                 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4094                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4095                                 self.context.monitor_pending_revoke_and_ack = true;
4096                                 None
4097                         } else {
4098                                 Some(self.get_last_revoke_and_ack())
4099                         }
4100                 } else {
4101                         return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4102                 };
4103
4104                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4105                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4106                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4107                 // the corresponding revoke_and_ack back yet.
4108                 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4109                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4110                         self.mark_awaiting_response();
4111                 }
4112                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4113
4114                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4115                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4116                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4117                         Some(msgs::ChannelReady {
4118                                 channel_id: self.context.channel_id(),
4119                                 next_per_commitment_point,
4120                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4121                         })
4122                 } else { None };
4123
4124                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4125                         if required_revoke.is_some() {
4126                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4127                         } else {
4128                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4129                         }
4130
4131                         Ok(ReestablishResponses {
4132                                 channel_ready, shutdown_msg, announcement_sigs,
4133                                 raa: required_revoke,
4134                                 commitment_update: None,
4135                                 order: self.context.resend_order.clone(),
4136                         })
4137                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4138                         if required_revoke.is_some() {
4139                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4140                         } else {
4141                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4142                         }
4143
4144                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4145                                 self.context.monitor_pending_commitment_signed = true;
4146                                 Ok(ReestablishResponses {
4147                                         channel_ready, shutdown_msg, announcement_sigs,
4148                                         commitment_update: None, raa: None,
4149                                         order: self.context.resend_order.clone(),
4150                                 })
4151                         } else {
4152                                 Ok(ReestablishResponses {
4153                                         channel_ready, shutdown_msg, announcement_sigs,
4154                                         raa: required_revoke,
4155                                         commitment_update: Some(self.get_last_commitment_update(logger)),
4156                                         order: self.context.resend_order.clone(),
4157                                 })
4158                         }
4159                 } else {
4160                         Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4161                 }
4162         }
4163
4164         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4165         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4166         /// at which point they will be recalculated.
4167         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4168                 -> (u64, u64)
4169                 where F::Target: FeeEstimator
4170         {
4171                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4172
4173                 // Propose a range from our current Background feerate to our Normal feerate plus our
4174                 // force_close_avoidance_max_fee_satoshis.
4175                 // If we fail to come to consensus, we'll have to force-close.
4176                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4177                 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4178                 // that we don't expect to need fee bumping
4179                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4180                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4181
4182                 // The spec requires that (when the channel does not have anchors) we only send absolute
4183                 // channel fees no greater than the absolute channel fee on the current commitment
4184                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4185                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4186                 // some force-closure by old nodes, but we wanted to close the channel anyway.
4187
4188                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4189                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4190                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4191                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4192                 }
4193
4194                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4195                 // below our dust limit, causing the output to disappear. We don't bother handling this
4196                 // case, however, as this should only happen if a channel is closed before any (material)
4197                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4198                 // come to consensus with our counterparty on appropriate fees, however it should be a
4199                 // relatively rare case. We can revisit this later, though note that in order to determine
4200                 // if the funders' output is dust we have to know the absolute fee we're going to use.
4201                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4202                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4203                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4204                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
4205                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
4206                                 // target feerate-calculated fee.
4207                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4208                                         proposed_max_feerate as u64 * tx_weight / 1000)
4209                         } else {
4210                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4211                         };
4212
4213                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4214                 self.context.closing_fee_limits.clone().unwrap()
4215         }
4216
4217         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4218         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4219         /// this point if we're the funder we should send the initial closing_signed, and in any case
4220         /// shutdown should complete within a reasonable timeframe.
4221         fn closing_negotiation_ready(&self) -> bool {
4222                 self.context.closing_negotiation_ready()
4223         }
4224
4225         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4226         /// an Err if no progress is being made and the channel should be force-closed instead.
4227         /// Should be called on a one-minute timer.
4228         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4229                 if self.closing_negotiation_ready() {
4230                         if self.context.closing_signed_in_flight {
4231                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4232                         } else {
4233                                 self.context.closing_signed_in_flight = true;
4234                         }
4235                 }
4236                 Ok(())
4237         }
4238
4239         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4240                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4241                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4242                 where F::Target: FeeEstimator, L::Target: Logger
4243         {
4244                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4245                         return Ok((None, None, None));
4246                 }
4247
4248                 if !self.context.is_outbound() {
4249                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4250                                 return self.closing_signed(fee_estimator, &msg);
4251                         }
4252                         return Ok((None, None, None));
4253                 }
4254
4255                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4256
4257                 assert!(self.context.shutdown_scriptpubkey.is_some());
4258                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4259                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4260                         our_min_fee, our_max_fee, total_fee_satoshis);
4261
4262                 match &self.context.holder_signer {
4263                         ChannelSignerType::Ecdsa(ecdsa) => {
4264                                 let sig = ecdsa
4265                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4266                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4267
4268                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4269                                 Ok((Some(msgs::ClosingSigned {
4270                                         channel_id: self.context.channel_id,
4271                                         fee_satoshis: total_fee_satoshis,
4272                                         signature: sig,
4273                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4274                                                 min_fee_satoshis: our_min_fee,
4275                                                 max_fee_satoshis: our_max_fee,
4276                                         }),
4277                                 }), None, None))
4278                         }
4279                 }
4280         }
4281
4282         // Marks a channel as waiting for a response from the counterparty. If it's not received
4283         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4284         // a reconnection.
4285         fn mark_awaiting_response(&mut self) {
4286                 self.context.sent_message_awaiting_response = Some(0);
4287         }
4288
4289         /// Determines whether we should disconnect the counterparty due to not receiving a response
4290         /// within our expected timeframe.
4291         ///
4292         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4293         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4294                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4295                         ticks_elapsed
4296                 } else {
4297                         // Don't disconnect when we're not waiting on a response.
4298                         return false;
4299                 };
4300                 *ticks_elapsed += 1;
4301                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4302         }
4303
4304         pub fn shutdown(
4305                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4306         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4307         {
4308                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4309                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4310                 }
4311                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4312                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
4313                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4314                         // can do that via error message without getting a connection fail anyway...
4315                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4316                 }
4317                 for htlc in self.context.pending_inbound_htlcs.iter() {
4318                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4319                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4320                         }
4321                 }
4322                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4323
4324                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4325                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4326                 }
4327
4328                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4329                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4330                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4331                         }
4332                 } else {
4333                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4334                 }
4335
4336                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4337                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4338                 // any further commitment updates after we set LocalShutdownSent.
4339                 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4340
4341                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4342                         Some(_) => false,
4343                         None => {
4344                                 assert!(send_shutdown);
4345                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4346                                         Ok(scriptpubkey) => scriptpubkey,
4347                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4348                                 };
4349                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
4350                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4351                                 }
4352                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4353                                 true
4354                         },
4355                 };
4356
4357                 // From here on out, we may not fail!
4358
4359                 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4360                 self.context.update_time_counter += 1;
4361
4362                 let monitor_update = if update_shutdown_script {
4363                         self.context.latest_monitor_update_id += 1;
4364                         let monitor_update = ChannelMonitorUpdate {
4365                                 update_id: self.context.latest_monitor_update_id,
4366                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4367                                         scriptpubkey: self.get_closing_scriptpubkey(),
4368                                 }],
4369                         };
4370                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4371                         self.push_ret_blockable_mon_update(monitor_update)
4372                 } else { None };
4373                 let shutdown = if send_shutdown {
4374                         Some(msgs::Shutdown {
4375                                 channel_id: self.context.channel_id,
4376                                 scriptpubkey: self.get_closing_scriptpubkey(),
4377                         })
4378                 } else { None };
4379
4380                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4381                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4382                 // cell HTLCs and return them to fail the payment.
4383                 self.context.holding_cell_update_fee = None;
4384                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4385                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4386                         match htlc_update {
4387                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4388                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4389                                         false
4390                                 },
4391                                 _ => true
4392                         }
4393                 });
4394
4395                 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4396                 self.context.update_time_counter += 1;
4397
4398                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4399         }
4400
4401         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4402                 let mut tx = closing_tx.trust().built_transaction().clone();
4403
4404                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4405
4406                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4407                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4408                 let mut holder_sig = sig.serialize_der().to_vec();
4409                 holder_sig.push(EcdsaSighashType::All as u8);
4410                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4411                 cp_sig.push(EcdsaSighashType::All as u8);
4412                 if funding_key[..] < counterparty_funding_key[..] {
4413                         tx.input[0].witness.push(holder_sig);
4414                         tx.input[0].witness.push(cp_sig);
4415                 } else {
4416                         tx.input[0].witness.push(cp_sig);
4417                         tx.input[0].witness.push(holder_sig);
4418                 }
4419
4420                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4421                 tx
4422         }
4423
4424         pub fn closing_signed<F: Deref>(
4425                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4426                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4427                 where F::Target: FeeEstimator
4428         {
4429                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4430                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4431                 }
4432                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4433                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4434                 }
4435                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4436                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4437                 }
4438                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4439                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4440                 }
4441
4442                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4443                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4444                 }
4445
4446                 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4447                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
4448                         return Ok((None, None, None));
4449                 }
4450
4451                 let funding_redeemscript = self.context.get_funding_redeemscript();
4452                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4453                 if used_total_fee != msg.fee_satoshis {
4454                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4455                 }
4456                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4457
4458                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4459                         Ok(_) => {},
4460                         Err(_e) => {
4461                                 // The remote end may have decided to revoke their output due to inconsistent dust
4462                                 // limits, so check for that case by re-checking the signature here.
4463                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4464                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4465                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4466                         },
4467                 };
4468
4469                 for outp in closing_tx.trust().built_transaction().output.iter() {
4470                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4471                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4472                         }
4473                 }
4474
4475                 assert!(self.context.shutdown_scriptpubkey.is_some());
4476                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4477                         if last_fee == msg.fee_satoshis {
4478                                 let shutdown_result = ShutdownResult {
4479                                         monitor_update: None,
4480                                         dropped_outbound_htlcs: Vec::new(),
4481                                         unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4482                                 };
4483                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4484                                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4485                                 self.context.update_time_counter += 1;
4486                                 return Ok((None, Some(tx), Some(shutdown_result)));
4487                         }
4488                 }
4489
4490                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4491
4492                 macro_rules! propose_fee {
4493                         ($new_fee: expr) => {
4494                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4495                                         (closing_tx, $new_fee)
4496                                 } else {
4497                                         self.build_closing_transaction($new_fee, false)
4498                                 };
4499
4500                                 return match &self.context.holder_signer {
4501                                         ChannelSignerType::Ecdsa(ecdsa) => {
4502                                                 let sig = ecdsa
4503                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4504                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4505                                                 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4506                                                         let shutdown_result = ShutdownResult {
4507                                                                 monitor_update: None,
4508                                                                 dropped_outbound_htlcs: Vec::new(),
4509                                                                 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4510                                                         };
4511                                                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
4512                                                         self.context.update_time_counter += 1;
4513                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4514                                                         (Some(tx), Some(shutdown_result))
4515                                                 } else {
4516                                                         (None, None)
4517                                                 };
4518
4519                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4520                                                 Ok((Some(msgs::ClosingSigned {
4521                                                         channel_id: self.context.channel_id,
4522                                                         fee_satoshis: used_fee,
4523                                                         signature: sig,
4524                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4525                                                                 min_fee_satoshis: our_min_fee,
4526                                                                 max_fee_satoshis: our_max_fee,
4527                                                         }),
4528                                                 }), signed_tx, shutdown_result))
4529                                         }
4530                                 }
4531                         }
4532                 }
4533
4534                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4535                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4536                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4537                         }
4538                         if max_fee_satoshis < our_min_fee {
4539                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4540                         }
4541                         if min_fee_satoshis > our_max_fee {
4542                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4543                         }
4544
4545                         if !self.context.is_outbound() {
4546                                 // They have to pay, so pick the highest fee in the overlapping range.
4547                                 // We should never set an upper bound aside from their full balance
4548                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4549                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4550                         } else {
4551                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4552                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4553                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
4554                                 }
4555                                 // The proposed fee is in our acceptable range, accept it and broadcast!
4556                                 propose_fee!(msg.fee_satoshis);
4557                         }
4558                 } else {
4559                         // Old fee style negotiation. We don't bother to enforce whether they are complying
4560                         // with the "making progress" requirements, we just comply and hope for the best.
4561                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4562                                 if msg.fee_satoshis > last_fee {
4563                                         if msg.fee_satoshis < our_max_fee {
4564                                                 propose_fee!(msg.fee_satoshis);
4565                                         } else if last_fee < our_max_fee {
4566                                                 propose_fee!(our_max_fee);
4567                                         } else {
4568                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4569                                         }
4570                                 } else {
4571                                         if msg.fee_satoshis > our_min_fee {
4572                                                 propose_fee!(msg.fee_satoshis);
4573                                         } else if last_fee > our_min_fee {
4574                                                 propose_fee!(our_min_fee);
4575                                         } else {
4576                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4577                                         }
4578                                 }
4579                         } else {
4580                                 if msg.fee_satoshis < our_min_fee {
4581                                         propose_fee!(our_min_fee);
4582                                 } else if msg.fee_satoshis > our_max_fee {
4583                                         propose_fee!(our_max_fee);
4584                                 } else {
4585                                         propose_fee!(msg.fee_satoshis);
4586                                 }
4587                         }
4588                 }
4589         }
4590
4591         fn internal_htlc_satisfies_config(
4592                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4593         ) -> Result<(), (&'static str, u16)> {
4594                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4595                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4596                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4597                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4598                         return Err((
4599                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4600                                 0x1000 | 12, // fee_insufficient
4601                         ));
4602                 }
4603                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4604                         return Err((
4605                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4606                                 0x1000 | 13, // incorrect_cltv_expiry
4607                         ));
4608                 }
4609                 Ok(())
4610         }
4611
4612         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4613         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4614         /// unsuccessful, falls back to the previous one if one exists.
4615         pub fn htlc_satisfies_config(
4616                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4617         ) -> Result<(), (&'static str, u16)> {
4618                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4619                         .or_else(|err| {
4620                                 if let Some(prev_config) = self.context.prev_config() {
4621                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4622                                 } else {
4623                                         Err(err)
4624                                 }
4625                         })
4626         }
4627
4628         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4629                 self.context.cur_holder_commitment_transaction_number + 1
4630         }
4631
4632         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4633                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4634         }
4635
4636         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4637                 self.context.cur_counterparty_commitment_transaction_number + 2
4638         }
4639
4640         #[cfg(test)]
4641         pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
4642                 &self.context.holder_signer
4643         }
4644
4645         #[cfg(test)]
4646         pub fn get_value_stat(&self) -> ChannelValueStat {
4647                 ChannelValueStat {
4648                         value_to_self_msat: self.context.value_to_self_msat,
4649                         channel_value_msat: self.context.channel_value_satoshis * 1000,
4650                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4651                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4652                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4653                         holding_cell_outbound_amount_msat: {
4654                                 let mut res = 0;
4655                                 for h in self.context.holding_cell_htlc_updates.iter() {
4656                                         match h {
4657                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4658                                                         res += amount_msat;
4659                                                 }
4660                                                 _ => {}
4661                                         }
4662                                 }
4663                                 res
4664                         },
4665                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4666                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4667                 }
4668         }
4669
4670         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4671         /// Allowed in any state (including after shutdown)
4672         pub fn is_awaiting_monitor_update(&self) -> bool {
4673                 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4674         }
4675
4676         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4677         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4678                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4679                 self.context.blocked_monitor_updates[0].update.update_id - 1
4680         }
4681
4682         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4683         /// further blocked monitor update exists after the next.
4684         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4685                 if self.context.blocked_monitor_updates.is_empty() { return None; }
4686                 Some((self.context.blocked_monitor_updates.remove(0).update,
4687                         !self.context.blocked_monitor_updates.is_empty()))
4688         }
4689
4690         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4691         /// immediately given to the user for persisting or `None` if it should be held as blocked.
4692         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4693         -> Option<ChannelMonitorUpdate> {
4694                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4695                 if !release_monitor {
4696                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4697                                 update,
4698                         });
4699                         None
4700                 } else {
4701                         Some(update)
4702                 }
4703         }
4704
4705         pub fn blocked_monitor_updates_pending(&self) -> usize {
4706                 self.context.blocked_monitor_updates.len()
4707         }
4708
4709         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4710         /// If the channel is outbound, this implies we have not yet broadcasted the funding
4711         /// transaction. If the channel is inbound, this implies simply that the channel has not
4712         /// advanced state.
4713         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4714                 if !self.is_awaiting_monitor_update() { return false; }
4715                 if self.context.channel_state &
4716                         !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4717                                 == ChannelState::FundingSent as u32 {
4718                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4719                         // FundingSent set, though our peer could have sent their channel_ready.
4720                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4721                         return true;
4722                 }
4723                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4724                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4725                         // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4726                         // waiting for the initial monitor persistence. Thus, we check if our commitment
4727                         // transaction numbers have both been iterated only exactly once (for the
4728                         // funding_signed), and we're awaiting monitor update.
4729                         //
4730                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4731                         // only way to get an awaiting-monitor-update state during initial funding is if the
4732                         // initial monitor persistence is still pending).
4733                         //
4734                         // Because deciding we're awaiting initial broadcast spuriously could result in
4735                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4736                         // we hard-assert here, even in production builds.
4737                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4738                         assert!(self.context.monitor_pending_channel_ready);
4739                         assert_eq!(self.context.latest_monitor_update_id, 0);
4740                         return true;
4741                 }
4742                 false
4743         }
4744
4745         /// Returns true if our channel_ready has been sent
4746         pub fn is_our_channel_ready(&self) -> bool {
4747                 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4748         }
4749
4750         /// Returns true if our peer has either initiated or agreed to shut down the channel.
4751         pub fn received_shutdown(&self) -> bool {
4752                 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4753         }
4754
4755         /// Returns true if we either initiated or agreed to shut down the channel.
4756         pub fn sent_shutdown(&self) -> bool {
4757                 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4758         }
4759
4760         /// Returns true if this channel is fully shut down. True here implies that no further actions
4761         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4762         /// will be handled appropriately by the chain monitor.
4763         pub fn is_shutdown(&self) -> bool {
4764                 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
4765                         assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4766                         true
4767                 } else { false }
4768         }
4769
4770         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4771                 self.context.channel_update_status
4772         }
4773
4774         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4775                 self.context.update_time_counter += 1;
4776                 self.context.channel_update_status = status;
4777         }
4778
4779         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4780                 // Called:
4781                 //  * always when a new block/transactions are confirmed with the new height
4782                 //  * when funding is signed with a height of 0
4783                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4784                         return None;
4785                 }
4786
4787                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4788                 if funding_tx_confirmations <= 0 {
4789                         self.context.funding_tx_confirmation_height = 0;
4790                 }
4791
4792                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4793                         return None;
4794                 }
4795
4796                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4797                 // channel_ready until the entire batch is ready.
4798                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4799                 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4800                         self.context.channel_state |= ChannelState::OurChannelReady as u32;
4801                         true
4802                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4803                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4804                         self.context.update_time_counter += 1;
4805                         true
4806                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4807                         // We got a reorg but not enough to trigger a force close, just ignore.
4808                         false
4809                 } else {
4810                         if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4811                                 // We should never see a funding transaction on-chain until we've received
4812                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4813                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4814                                 // however, may do this and we shouldn't treat it as a bug.
4815                                 #[cfg(not(fuzzing))]
4816                                 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4817                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4818                                         self.context.channel_state);
4819                         }
4820                         // We got a reorg but not enough to trigger a force close, just ignore.
4821                         false
4822                 };
4823
4824                 if need_commitment_update {
4825                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4826                                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4827                                         let next_per_commitment_point =
4828                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4829                                         return Some(msgs::ChannelReady {
4830                                                 channel_id: self.context.channel_id,
4831                                                 next_per_commitment_point,
4832                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4833                                         });
4834                                 }
4835                         } else {
4836                                 self.context.monitor_pending_channel_ready = true;
4837                         }
4838                 }
4839                 None
4840         }
4841
4842         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4843         /// In the first case, we store the confirmation height and calculating the short channel id.
4844         /// In the second, we simply return an Err indicating we need to be force-closed now.
4845         pub fn transactions_confirmed<NS: Deref, L: Deref>(
4846                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
4847                 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
4848         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4849         where
4850                 NS::Target: NodeSigner,
4851                 L::Target: Logger
4852         {
4853                 let mut msgs = (None, None);
4854                 if let Some(funding_txo) = self.context.get_funding_txo() {
4855                         for &(index_in_block, tx) in txdata.iter() {
4856                                 // Check if the transaction is the expected funding transaction, and if it is,
4857                                 // check that it pays the right amount to the right script.
4858                                 if self.context.funding_tx_confirmation_height == 0 {
4859                                         if tx.txid() == funding_txo.txid {
4860                                                 let txo_idx = funding_txo.index as usize;
4861                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
4862                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
4863                                                         if self.context.is_outbound() {
4864                                                                 // If we generated the funding transaction and it doesn't match what it
4865                                                                 // should, the client is really broken and we should just panic and
4866                                                                 // tell them off. That said, because hash collisions happen with high
4867                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
4868                                                                 // channel and move on.
4869                                                                 #[cfg(not(fuzzing))]
4870                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4871                                                         }
4872                                                         self.context.update_time_counter += 1;
4873                                                         let err_reason = "funding tx had wrong script/value or output index";
4874                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
4875                                                 } else {
4876                                                         if self.context.is_outbound() {
4877                                                                 if !tx.is_coin_base() {
4878                                                                         for input in tx.input.iter() {
4879                                                                                 if input.witness.is_empty() {
4880                                                                                         // We generated a malleable funding transaction, implying we've
4881                                                                                         // just exposed ourselves to funds loss to our counterparty.
4882                                                                                         #[cfg(not(fuzzing))]
4883                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4884                                                                                 }
4885                                                                         }
4886                                                                 }
4887                                                         }
4888                                                         self.context.funding_tx_confirmation_height = height;
4889                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
4890                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
4891                                                                 Ok(scid) => Some(scid),
4892                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
4893                                                         }
4894                                                 }
4895                                                 // If this is a coinbase transaction and not a 0-conf channel
4896                                                 // we should update our min_depth to 100 to handle coinbase maturity
4897                                                 if tx.is_coin_base() &&
4898                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
4899                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
4900                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
4901                                                 }
4902                                         }
4903                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
4904                                         // send it immediately instead of waiting for a best_block_updated call (which
4905                                         // may have already happened for this block).
4906                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
4907                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4908                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
4909                                                 msgs = (Some(channel_ready), announcement_sigs);
4910                                         }
4911                                 }
4912                                 for inp in tx.input.iter() {
4913                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
4914                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
4915                                                 return Err(ClosureReason::CommitmentTxConfirmed);
4916                                         }
4917                                 }
4918                         }
4919                 }
4920                 Ok(msgs)
4921         }
4922
4923         /// When a new block is connected, we check the height of the block against outbound holding
4924         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
4925         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
4926         /// handled by the ChannelMonitor.
4927         ///
4928         /// If we return Err, the channel may have been closed, at which point the standard
4929         /// requirements apply - no calls may be made except those explicitly stated to be allowed
4930         /// post-shutdown.
4931         ///
4932         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
4933         /// back.
4934         pub fn best_block_updated<NS: Deref, L: Deref>(
4935                 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
4936                 node_signer: &NS, user_config: &UserConfig, logger: &L
4937         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4938         where
4939                 NS::Target: NodeSigner,
4940                 L::Target: Logger
4941         {
4942                 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
4943         }
4944
4945         fn do_best_block_updated<NS: Deref, L: Deref>(
4946                 &mut self, height: u32, highest_header_time: u32,
4947                 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
4948         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4949         where
4950                 NS::Target: NodeSigner,
4951                 L::Target: Logger
4952         {
4953                 let mut timed_out_htlcs = Vec::new();
4954                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4955                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
4956                 // ~now.
4957                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
4958                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4959                         match htlc_update {
4960                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
4961                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
4962                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
4963                                                 false
4964                                         } else { true }
4965                                 },
4966                                 _ => true
4967                         }
4968                 });
4969
4970                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
4971
4972                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4973                         let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
4974                                 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
4975                         } else { None };
4976                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4977                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
4978                 }
4979
4980                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4981                 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
4982                    (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
4983                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4984                         if self.context.funding_tx_confirmation_height == 0 {
4985                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
4986                                 // zero if it has been reorged out, however in either case, our state flags
4987                                 // indicate we've already sent a channel_ready
4988                                 funding_tx_confirmations = 0;
4989                         }
4990
4991                         // If we've sent channel_ready (or have both sent and received channel_ready), and
4992                         // the funding transaction has become unconfirmed,
4993                         // close the channel and hope we can get the latest state on chain (because presumably
4994                         // the funding transaction is at least still in the mempool of most nodes).
4995                         //
4996                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
4997                         // 0-conf channel, but not doing so may lead to the
4998                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
4999                         // to.
5000                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5001                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5002                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5003                                 return Err(ClosureReason::ProcessingError { err: err_reason });
5004                         }
5005                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5006                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5007                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5008                         // If funding_tx_confirmed_in is unset, the channel must not be active
5009                         assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5010                         assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5011                         return Err(ClosureReason::FundingTimedOut);
5012                 }
5013
5014                 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5015                         self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5016                 } else { None };
5017                 Ok((None, timed_out_htlcs, announcement_sigs))
5018         }
5019
5020         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5021         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5022         /// before the channel has reached channel_ready and we can just wait for more blocks.
5023         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5024                 if self.context.funding_tx_confirmation_height != 0 {
5025                         // We handle the funding disconnection by calling best_block_updated with a height one
5026                         // below where our funding was connected, implying a reorg back to conf_height - 1.
5027                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
5028                         // We use the time field to bump the current time we set on channel updates if its
5029                         // larger. If we don't know that time has moved forward, we can just set it to the last
5030                         // time we saw and it will be ignored.
5031                         let best_time = self.context.update_time_counter;
5032                         match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5033                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5034                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5035                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5036                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5037                                         Ok(())
5038                                 },
5039                                 Err(e) => Err(e)
5040                         }
5041                 } else {
5042                         // We never learned about the funding confirmation anyway, just ignore
5043                         Ok(())
5044                 }
5045         }
5046
5047         // Methods to get unprompted messages to send to the remote end (or where we already returned
5048         // something in the handler for the message that prompted this message):
5049
5050         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5051         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5052         /// directions). Should be used for both broadcasted announcements and in response to an
5053         /// AnnouncementSignatures message from the remote peer.
5054         ///
5055         /// Will only fail if we're not in a state where channel_announcement may be sent (including
5056         /// closing).
5057         ///
5058         /// This will only return ChannelError::Ignore upon failure.
5059         ///
5060         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5061         fn get_channel_announcement<NS: Deref>(
5062                 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5063         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5064                 if !self.context.config.announced_channel {
5065                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5066                 }
5067                 if !self.context.is_usable() {
5068                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5069                 }
5070
5071                 let short_channel_id = self.context.get_short_channel_id()
5072                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5073                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5074                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5075                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5076                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5077
5078                 let msg = msgs::UnsignedChannelAnnouncement {
5079                         features: channelmanager::provided_channel_features(&user_config),
5080                         chain_hash,
5081                         short_channel_id,
5082                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5083                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5084                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5085                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5086                         excess_data: Vec::new(),
5087                 };
5088
5089                 Ok(msg)
5090         }
5091
5092         fn get_announcement_sigs<NS: Deref, L: Deref>(
5093                 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5094                 best_block_height: u32, logger: &L
5095         ) -> Option<msgs::AnnouncementSignatures>
5096         where
5097                 NS::Target: NodeSigner,
5098                 L::Target: Logger
5099         {
5100                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5101                         return None;
5102                 }
5103
5104                 if !self.context.is_usable() {
5105                         return None;
5106                 }
5107
5108                 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5109                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5110                         return None;
5111                 }
5112
5113                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5114                         return None;
5115                 }
5116
5117                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5118                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5119                         Ok(a) => a,
5120                         Err(e) => {
5121                                 log_trace!(logger, "{:?}", e);
5122                                 return None;
5123                         }
5124                 };
5125                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5126                         Err(_) => {
5127                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5128                                 return None;
5129                         },
5130                         Ok(v) => v
5131                 };
5132                 match &self.context.holder_signer {
5133                         ChannelSignerType::Ecdsa(ecdsa) => {
5134                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5135                                         Err(_) => {
5136                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5137                                                 return None;
5138                                         },
5139                                         Ok(v) => v
5140                                 };
5141                                 let short_channel_id = match self.context.get_short_channel_id() {
5142                                         Some(scid) => scid,
5143                                         None => return None,
5144                                 };
5145
5146                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5147
5148                                 Some(msgs::AnnouncementSignatures {
5149                                         channel_id: self.context.channel_id(),
5150                                         short_channel_id,
5151                                         node_signature: our_node_sig,
5152                                         bitcoin_signature: our_bitcoin_sig,
5153                                 })
5154                         }
5155                 }
5156         }
5157
5158         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5159         /// available.
5160         fn sign_channel_announcement<NS: Deref>(
5161                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5162         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5163                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5164                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5165                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5166                         let were_node_one = announcement.node_id_1 == our_node_key;
5167
5168                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5169                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5170                         match &self.context.holder_signer {
5171                                 ChannelSignerType::Ecdsa(ecdsa) => {
5172                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5173                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5174                                         Ok(msgs::ChannelAnnouncement {
5175                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5176                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5177                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5178                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5179                                                 contents: announcement,
5180                                         })
5181                                 }
5182                         }
5183                 } else {
5184                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5185                 }
5186         }
5187
5188         /// Processes an incoming announcement_signatures message, providing a fully-signed
5189         /// channel_announcement message which we can broadcast and storing our counterparty's
5190         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5191         pub fn announcement_signatures<NS: Deref>(
5192                 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5193                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5194         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5195                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5196
5197                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5198
5199                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5200                         return Err(ChannelError::Close(format!(
5201                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5202                                  &announcement, self.context.get_counterparty_node_id())));
5203                 }
5204                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5205                         return Err(ChannelError::Close(format!(
5206                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5207                                 &announcement, self.context.counterparty_funding_pubkey())));
5208                 }
5209
5210                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5211                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5212                         return Err(ChannelError::Ignore(
5213                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5214                 }
5215
5216                 self.sign_channel_announcement(node_signer, announcement)
5217         }
5218
5219         /// Gets a signed channel_announcement for this channel, if we previously received an
5220         /// announcement_signatures from our counterparty.
5221         pub fn get_signed_channel_announcement<NS: Deref>(
5222                 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5223         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5224                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5225                         return None;
5226                 }
5227                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5228                         Ok(res) => res,
5229                         Err(_) => return None,
5230                 };
5231                 match self.sign_channel_announcement(node_signer, announcement) {
5232                         Ok(res) => Some(res),
5233                         Err(_) => None,
5234                 }
5235         }
5236
5237         /// May panic if called on a channel that wasn't immediately-previously
5238         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5239         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5240                 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5241                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5242                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5243                 // current to_remote balances. However, it no longer has any use, and thus is now simply
5244                 // set to a dummy (but valid, as required by the spec) public key.
5245                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5246                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5247                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5248                 let mut pk = [2; 33]; pk[1] = 0xff;
5249                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5250                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5251                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5252                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5253                         remote_last_secret
5254                 } else {
5255                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5256                         [0;32]
5257                 };
5258                 self.mark_awaiting_response();
5259                 msgs::ChannelReestablish {
5260                         channel_id: self.context.channel_id(),
5261                         // The protocol has two different commitment number concepts - the "commitment
5262                         // transaction number", which starts from 0 and counts up, and the "revocation key
5263                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5264                         // commitment transaction numbers by the index which will be used to reveal the
5265                         // revocation key for that commitment transaction, which means we have to convert them
5266                         // to protocol-level commitment numbers here...
5267
5268                         // next_local_commitment_number is the next commitment_signed number we expect to
5269                         // receive (indicating if they need to resend one that we missed).
5270                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5271                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5272                         // receive, however we track it by the next commitment number for a remote transaction
5273                         // (which is one further, as they always revoke previous commitment transaction, not
5274                         // the one we send) so we have to decrement by 1. Note that if
5275                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5276                         // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5277                         // overflow here.
5278                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5279                         your_last_per_commitment_secret: remote_last_secret,
5280                         my_current_per_commitment_point: dummy_pubkey,
5281                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5282                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5283                         // txid of that interactive transaction, else we MUST NOT set it.
5284                         next_funding_txid: None,
5285                 }
5286         }
5287
5288
5289         // Send stuff to our remote peers:
5290
5291         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5292         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5293         /// commitment update.
5294         ///
5295         /// `Err`s will only be [`ChannelError::Ignore`].
5296         pub fn queue_add_htlc<F: Deref, L: Deref>(
5297                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5298                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5299                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5300         ) -> Result<(), ChannelError>
5301         where F::Target: FeeEstimator, L::Target: Logger
5302         {
5303                 self
5304                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5305                                 skimmed_fee_msat, fee_estimator, logger)
5306                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5307                         .map_err(|err| {
5308                                 if let ChannelError::Ignore(_) = err { /* fine */ }
5309                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5310                                 err
5311                         })
5312         }
5313
5314         /// Adds a pending outbound HTLC to this channel, note that you probably want
5315         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5316         ///
5317         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5318         /// the wire:
5319         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5320         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5321         ///   awaiting ACK.
5322         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5323         ///   we may not yet have sent the previous commitment update messages and will need to
5324         ///   regenerate them.
5325         ///
5326         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5327         /// on this [`Channel`] if `force_holding_cell` is false.
5328         ///
5329         /// `Err`s will only be [`ChannelError::Ignore`].
5330         fn send_htlc<F: Deref, L: Deref>(
5331                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5332                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5333                 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5334         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5335         where F::Target: FeeEstimator, L::Target: Logger
5336         {
5337                 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5338                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5339                 }
5340                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5341                 if amount_msat > channel_total_msat {
5342                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5343                 }
5344
5345                 if amount_msat == 0 {
5346                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5347                 }
5348
5349                 let available_balances = self.context.get_available_balances(fee_estimator);
5350                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5351                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5352                                 available_balances.next_outbound_htlc_minimum_msat)));
5353                 }
5354
5355                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5356                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5357                                 available_balances.next_outbound_htlc_limit_msat)));
5358                 }
5359
5360                 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5361                         // Note that this should never really happen, if we're !is_live() on receipt of an
5362                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5363                         // the user to send directly into a !is_live() channel. However, if we
5364                         // disconnected during the time the previous hop was doing the commitment dance we may
5365                         // end up getting here after the forwarding delay. In any case, returning an
5366                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5367                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5368                 }
5369
5370                 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5371                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5372                         payment_hash, amount_msat,
5373                         if force_holding_cell { "into holding cell" }
5374                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5375                         else { "to peer" });
5376
5377                 if need_holding_cell {
5378                         force_holding_cell = true;
5379                 }
5380
5381                 // Now update local state:
5382                 if force_holding_cell {
5383                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5384                                 amount_msat,
5385                                 payment_hash,
5386                                 cltv_expiry,
5387                                 source,
5388                                 onion_routing_packet,
5389                                 skimmed_fee_msat,
5390                         });
5391                         return Ok(None);
5392                 }
5393
5394                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5395                         htlc_id: self.context.next_holder_htlc_id,
5396                         amount_msat,
5397                         payment_hash: payment_hash.clone(),
5398                         cltv_expiry,
5399                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5400                         source,
5401                         skimmed_fee_msat,
5402                 });
5403
5404                 let res = msgs::UpdateAddHTLC {
5405                         channel_id: self.context.channel_id,
5406                         htlc_id: self.context.next_holder_htlc_id,
5407                         amount_msat,
5408                         payment_hash,
5409                         cltv_expiry,
5410                         onion_routing_packet,
5411                         skimmed_fee_msat,
5412                 };
5413                 self.context.next_holder_htlc_id += 1;
5414
5415                 Ok(Some(res))
5416         }
5417
5418         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5419                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5420                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5421                 // fail to generate this, we still are at least at a position where upgrading their status
5422                 // is acceptable.
5423                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5424                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5425                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5426                         } else { None };
5427                         if let Some(state) = new_state {
5428                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5429                                 htlc.state = state;
5430                         }
5431                 }
5432                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5433                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5434                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5435                                 // Grab the preimage, if it exists, instead of cloning
5436                                 let mut reason = OutboundHTLCOutcome::Success(None);
5437                                 mem::swap(outcome, &mut reason);
5438                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5439                         }
5440                 }
5441                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5442                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5443                                 debug_assert!(!self.context.is_outbound());
5444                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5445                                 self.context.feerate_per_kw = feerate;
5446                                 self.context.pending_update_fee = None;
5447                         }
5448                 }
5449                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5450
5451                 let (mut htlcs_ref, counterparty_commitment_tx) =
5452                         self.build_commitment_no_state_update(logger);
5453                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5454                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5455                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5456
5457                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5458                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5459                 }
5460
5461                 self.context.latest_monitor_update_id += 1;
5462                 let monitor_update = ChannelMonitorUpdate {
5463                         update_id: self.context.latest_monitor_update_id,
5464                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5465                                 commitment_txid: counterparty_commitment_txid,
5466                                 htlc_outputs: htlcs.clone(),
5467                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5468                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5469                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5470                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5471                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5472                         }]
5473                 };
5474                 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5475                 monitor_update
5476         }
5477
5478         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5479         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5480         where L::Target: Logger
5481         {
5482                 let counterparty_keys = self.context.build_remote_transaction_keys();
5483                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5484                 let counterparty_commitment_tx = commitment_stats.tx;
5485
5486                 #[cfg(any(test, fuzzing))]
5487                 {
5488                         if !self.context.is_outbound() {
5489                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5490                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5491                                 if let Some(info) = projected_commit_tx_info {
5492                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5493                                         if info.total_pending_htlcs == total_pending_htlcs
5494                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5495                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5496                                                 && info.feerate == self.context.feerate_per_kw {
5497                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5498                                                         assert_eq!(actual_fee, info.fee);
5499                                                 }
5500                                 }
5501                         }
5502                 }
5503
5504                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5505         }
5506
5507         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5508         /// generation when we shouldn't change HTLC/channel state.
5509         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5510                 // Get the fee tests from `build_commitment_no_state_update`
5511                 #[cfg(any(test, fuzzing))]
5512                 self.build_commitment_no_state_update(logger);
5513
5514                 let counterparty_keys = self.context.build_remote_transaction_keys();
5515                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5516                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5517
5518                 match &self.context.holder_signer {
5519                         ChannelSignerType::Ecdsa(ecdsa) => {
5520                                 let (signature, htlc_signatures);
5521
5522                                 {
5523                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5524                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5525                                                 htlcs.push(htlc);
5526                                         }
5527
5528                                         let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5529                                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
5530                                         signature = res.0;
5531                                         htlc_signatures = res.1;
5532
5533                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5534                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5535                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5536                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5537
5538                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5539                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5540                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5541                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5542                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5543                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5544                                         }
5545                                 }
5546
5547                                 Ok((msgs::CommitmentSigned {
5548                                         channel_id: self.context.channel_id,
5549                                         signature,
5550                                         htlc_signatures,
5551                                         #[cfg(taproot)]
5552                                         partial_signature_with_nonce: None,
5553                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5554                         }
5555                 }
5556         }
5557
5558         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5559         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5560         ///
5561         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5562         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5563         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5564                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5565                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5566                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5567         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5568         where F::Target: FeeEstimator, L::Target: Logger
5569         {
5570                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5571                         onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5572                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5573                 match send_res? {
5574                         Some(_) => {
5575                                 let monitor_update = self.build_commitment_no_status_check(logger);
5576                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5577                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
5578                         },
5579                         None => Ok(None)
5580                 }
5581         }
5582
5583         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5584         /// happened.
5585         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5586                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5587                         fee_base_msat: msg.contents.fee_base_msat,
5588                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5589                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
5590                 });
5591                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5592                 if did_change {
5593                         self.context.counterparty_forwarding_info = new_forwarding_info;
5594                 }
5595
5596                 Ok(did_change)
5597         }
5598
5599         /// Begins the shutdown process, getting a message for the remote peer and returning all
5600         /// holding cell HTLCs for payment failure.
5601         ///
5602         /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5603         /// [`ChannelMonitorUpdate`] will be returned).
5604         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5605                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5606         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5607         {
5608                 for htlc in self.context.pending_outbound_htlcs.iter() {
5609                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5610                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5611                         }
5612                 }
5613                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5614                         if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5615                                 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5616                         }
5617                         else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5618                                 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5619                         }
5620                 }
5621                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5622                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5623                 }
5624                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5625                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5626                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5627                 }
5628
5629                 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5630                 // script is set, we just force-close and call it a day.
5631                 let mut chan_closed = false;
5632                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5633                         chan_closed = true;
5634                 }
5635
5636                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5637                         Some(_) => false,
5638                         None if !chan_closed => {
5639                                 // use override shutdown script if provided
5640                                 let shutdown_scriptpubkey = match override_shutdown_script {
5641                                         Some(script) => script,
5642                                         None => {
5643                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
5644                                                 match signer_provider.get_shutdown_scriptpubkey() {
5645                                                         Ok(scriptpubkey) => scriptpubkey,
5646                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5647                                                 }
5648                                         },
5649                                 };
5650                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5651                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5652                                 }
5653                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5654                                 true
5655                         },
5656                         None => false,
5657                 };
5658
5659                 // From here on out, we may not fail!
5660                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5661                 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5662                         let shutdown_result = ShutdownResult {
5663                                 monitor_update: None,
5664                                 dropped_outbound_htlcs: Vec::new(),
5665                                 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5666                         };
5667                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
5668                         Some(shutdown_result)
5669                 } else {
5670                         self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5671                         None
5672                 };
5673                 self.context.update_time_counter += 1;
5674
5675                 let monitor_update = if update_shutdown_script {
5676                         self.context.latest_monitor_update_id += 1;
5677                         let monitor_update = ChannelMonitorUpdate {
5678                                 update_id: self.context.latest_monitor_update_id,
5679                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5680                                         scriptpubkey: self.get_closing_scriptpubkey(),
5681                                 }],
5682                         };
5683                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5684                         self.push_ret_blockable_mon_update(monitor_update)
5685                 } else { None };
5686                 let shutdown = msgs::Shutdown {
5687                         channel_id: self.context.channel_id,
5688                         scriptpubkey: self.get_closing_scriptpubkey(),
5689                 };
5690
5691                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5692                 // our shutdown until we've committed all of the pending changes.
5693                 self.context.holding_cell_update_fee = None;
5694                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5695                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5696                         match htlc_update {
5697                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5698                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5699                                         false
5700                                 },
5701                                 _ => true
5702                         }
5703                 });
5704
5705                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5706                         "we can't both complete shutdown and return a monitor update");
5707
5708                 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5709         }
5710
5711         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5712                 self.context.holding_cell_htlc_updates.iter()
5713                         .flat_map(|htlc_update| {
5714                                 match htlc_update {
5715                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5716                                                 => Some((source, payment_hash)),
5717                                         _ => None,
5718                                 }
5719                         })
5720                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5721         }
5722 }
5723
5724 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5725 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5726         pub context: ChannelContext<SP>,
5727         pub unfunded_context: UnfundedChannelContext,
5728 }
5729
5730 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5731         pub fn new<ES: Deref, F: Deref>(
5732                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5733                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5734                 outbound_scid_alias: u64
5735         ) -> Result<OutboundV1Channel<SP>, APIError>
5736         where ES::Target: EntropySource,
5737               F::Target: FeeEstimator
5738         {
5739                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5740                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5741                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5742                 let pubkeys = holder_signer.pubkeys().clone();
5743
5744                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5745                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5746                 }
5747                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5748                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5749                 }
5750                 let channel_value_msat = channel_value_satoshis * 1000;
5751                 if push_msat > channel_value_msat {
5752                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5753                 }
5754                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5755                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5756                 }
5757                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5758                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5759                         // Protocol level safety check in place, although it should never happen because
5760                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5761                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5762                 }
5763
5764                 let channel_type = Self::get_initial_channel_type(&config, their_features);
5765                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5766
5767                 let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5768                         (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5769                 } else {
5770                         (ConfirmationTarget::NonAnchorChannelFee, 0)
5771                 };
5772                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5773
5774                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5775                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5776                 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5777                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5778                 }
5779
5780                 let mut secp_ctx = Secp256k1::new();
5781                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5782
5783                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5784                         match signer_provider.get_shutdown_scriptpubkey() {
5785                                 Ok(scriptpubkey) => Some(scriptpubkey),
5786                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5787                         }
5788                 } else { None };
5789
5790                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5791                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
5792                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5793                         }
5794                 }
5795
5796                 let destination_script = match signer_provider.get_destination_script() {
5797                         Ok(script) => script,
5798                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5799                 };
5800
5801                 let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
5802
5803                 Ok(Self {
5804                         context: ChannelContext {
5805                                 user_id,
5806
5807                                 config: LegacyChannelConfig {
5808                                         options: config.channel_config.clone(),
5809                                         announced_channel: config.channel_handshake_config.announced_channel,
5810                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5811                                 },
5812
5813                                 prev_config: None,
5814
5815                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5816
5817                                 channel_id: temporary_channel_id,
5818                                 temporary_channel_id: Some(temporary_channel_id),
5819                                 channel_state: ChannelState::OurInitSent as u32,
5820                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
5821                                 secp_ctx,
5822                                 channel_value_satoshis,
5823
5824                                 latest_monitor_update_id: 0,
5825
5826                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
5827                                 shutdown_scriptpubkey,
5828                                 destination_script,
5829
5830                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5831                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5832                                 value_to_self_msat,
5833
5834                                 pending_inbound_htlcs: Vec::new(),
5835                                 pending_outbound_htlcs: Vec::new(),
5836                                 holding_cell_htlc_updates: Vec::new(),
5837                                 pending_update_fee: None,
5838                                 holding_cell_update_fee: None,
5839                                 next_holder_htlc_id: 0,
5840                                 next_counterparty_htlc_id: 0,
5841                                 update_time_counter: 1,
5842
5843                                 resend_order: RAACommitmentOrder::CommitmentFirst,
5844
5845                                 monitor_pending_channel_ready: false,
5846                                 monitor_pending_revoke_and_ack: false,
5847                                 monitor_pending_commitment_signed: false,
5848                                 monitor_pending_forwards: Vec::new(),
5849                                 monitor_pending_failures: Vec::new(),
5850                                 monitor_pending_finalized_fulfills: Vec::new(),
5851
5852                                 #[cfg(debug_assertions)]
5853                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5854                                 #[cfg(debug_assertions)]
5855                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5856
5857                                 last_sent_closing_fee: None,
5858                                 pending_counterparty_closing_signed: None,
5859                                 closing_fee_limits: None,
5860                                 target_closing_feerate_sats_per_kw: None,
5861
5862                                 funding_tx_confirmed_in: None,
5863                                 funding_tx_confirmation_height: 0,
5864                                 short_channel_id: None,
5865                                 channel_creation_height: current_chain_height,
5866
5867                                 feerate_per_kw: commitment_feerate,
5868                                 counterparty_dust_limit_satoshis: 0,
5869                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
5870                                 counterparty_max_htlc_value_in_flight_msat: 0,
5871                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
5872                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
5873                                 holder_selected_channel_reserve_satoshis,
5874                                 counterparty_htlc_minimum_msat: 0,
5875                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
5876                                 counterparty_max_accepted_htlcs: 0,
5877                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
5878                                 minimum_depth: None, // Filled in in accept_channel
5879
5880                                 counterparty_forwarding_info: None,
5881
5882                                 channel_transaction_parameters: ChannelTransactionParameters {
5883                                         holder_pubkeys: pubkeys,
5884                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
5885                                         is_outbound_from_holder: true,
5886                                         counterparty_parameters: None,
5887                                         funding_outpoint: None,
5888                                         channel_type_features: channel_type.clone()
5889                                 },
5890                                 funding_transaction: None,
5891                                 is_batch_funding: None,
5892
5893                                 counterparty_cur_commitment_point: None,
5894                                 counterparty_prev_commitment_point: None,
5895                                 counterparty_node_id,
5896
5897                                 counterparty_shutdown_scriptpubkey: None,
5898
5899                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
5900
5901                                 channel_update_status: ChannelUpdateStatus::Enabled,
5902                                 closing_signed_in_flight: false,
5903
5904                                 announcement_sigs: None,
5905
5906                                 #[cfg(any(test, fuzzing))]
5907                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
5908                                 #[cfg(any(test, fuzzing))]
5909                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
5910
5911                                 workaround_lnd_bug_4006: None,
5912                                 sent_message_awaiting_response: None,
5913
5914                                 latest_inbound_scid_alias: None,
5915                                 outbound_scid_alias,
5916
5917                                 channel_pending_event_emitted: false,
5918                                 channel_ready_event_emitted: false,
5919
5920                                 #[cfg(any(test, fuzzing))]
5921                                 historical_inbound_htlc_fulfills: HashSet::new(),
5922
5923                                 channel_type,
5924                                 channel_keys_id,
5925
5926                                 blocked_monitor_updates: Vec::new(),
5927                         },
5928                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
5929                 })
5930         }
5931
5932         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
5933         fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5934                 let counterparty_keys = self.context.build_remote_transaction_keys();
5935                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5936                 match &self.context.holder_signer {
5937                         // TODO (taproot|arik): move match into calling method for Taproot
5938                         ChannelSignerType::Ecdsa(ecdsa) => {
5939                                 Ok(ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5940                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5941                         }
5942                 }
5943         }
5944
5945         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5946         /// a funding_created message for the remote peer.
5947         /// Panics if called at some time other than immediately after initial handshake, if called twice,
5948         /// or if called on an inbound channel.
5949         /// Note that channel_id changes during this call!
5950         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5951         /// If an Err is returned, it is a ChannelError::Close.
5952         pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
5953         -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
5954                 if !self.context.is_outbound() {
5955                         panic!("Tried to create outbound funding_created message on an inbound channel!");
5956                 }
5957                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5958                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5959                 }
5960                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5961                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5962                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5963                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5964                 }
5965
5966                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5967                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
5968
5969                 let signature = match self.get_funding_created_signature(logger) {
5970                         Ok(res) => res,
5971                         Err(e) => {
5972                                 log_error!(logger, "Got bad signatures: {:?}!", e);
5973                                 self.context.channel_transaction_parameters.funding_outpoint = None;
5974                                 return Err((self, e));
5975                         }
5976                 };
5977
5978                 let temporary_channel_id = self.context.channel_id;
5979
5980                 // Now that we're past error-generating stuff, update our local state:
5981
5982                 self.context.channel_state = ChannelState::FundingCreated as u32;
5983                 self.context.channel_id = funding_txo.to_channel_id();
5984
5985                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
5986                 // We can skip this if it is a zero-conf channel.
5987                 if funding_transaction.is_coin_base() &&
5988                         self.context.minimum_depth.unwrap_or(0) > 0 &&
5989                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5990                         self.context.minimum_depth = Some(COINBASE_MATURITY);
5991                 }
5992
5993                 self.context.funding_transaction = Some(funding_transaction);
5994                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
5995
5996                 let channel = Channel {
5997                         context: self.context,
5998                 };
5999
6000                 Ok((channel, msgs::FundingCreated {
6001                         temporary_channel_id,
6002                         funding_txid: funding_txo.txid,
6003                         funding_output_index: funding_txo.index,
6004                         signature,
6005                         #[cfg(taproot)]
6006                         partial_signature_with_nonce: None,
6007                         #[cfg(taproot)]
6008                         next_local_nonce: None,
6009                 }))
6010         }
6011
6012         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6013                 // The default channel type (ie the first one we try) depends on whether the channel is
6014                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6015                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6016                 // with no other changes, and fall back to `only_static_remotekey`.
6017                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6018                 if !config.channel_handshake_config.announced_channel &&
6019                         config.channel_handshake_config.negotiate_scid_privacy &&
6020                         their_features.supports_scid_privacy() {
6021                         ret.set_scid_privacy_required();
6022                 }
6023
6024                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6025                 // set it now. If they don't understand it, we'll fall back to our default of
6026                 // `only_static_remotekey`.
6027                 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6028                         their_features.supports_anchors_zero_fee_htlc_tx() {
6029                         ret.set_anchors_zero_fee_htlc_tx_required();
6030                 }
6031
6032                 ret
6033         }
6034
6035         /// If we receive an error message, it may only be a rejection of the channel type we tried,
6036         /// not of our ability to open any channel at all. Thus, on error, we should first call this
6037         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6038         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6039                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6040         ) -> Result<msgs::OpenChannel, ()>
6041         where
6042                 F::Target: FeeEstimator
6043         {
6044                 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6045                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6046                         // We've exhausted our options
6047                         return Err(());
6048                 }
6049                 // We support opening a few different types of channels. Try removing our additional
6050                 // features one by one until we've either arrived at our default or the counterparty has
6051                 // accepted one.
6052                 //
6053                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6054                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6055                 // checks whether the counterparty supports every feature, this would only happen if the
6056                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6057                 // whatever reason.
6058                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6059                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6060                         self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6061                         assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6062                 } else if self.context.channel_type.supports_scid_privacy() {
6063                         self.context.channel_type.clear_scid_privacy();
6064                 } else {
6065                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6066                 }
6067                 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6068                 Ok(self.get_open_channel(chain_hash))
6069         }
6070
6071         pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6072                 if !self.context.is_outbound() {
6073                         panic!("Tried to open a channel for an inbound channel?");
6074                 }
6075                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6076                         panic!("Cannot generate an open_channel after we've moved forward");
6077                 }
6078
6079                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6080                         panic!("Tried to send an open_channel for a channel that has already advanced");
6081                 }
6082
6083                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6084                 let keys = self.context.get_holder_pubkeys();
6085
6086                 msgs::OpenChannel {
6087                         chain_hash,
6088                         temporary_channel_id: self.context.channel_id,
6089                         funding_satoshis: self.context.channel_value_satoshis,
6090                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6091                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6092                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6093                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6094                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6095                         feerate_per_kw: self.context.feerate_per_kw as u32,
6096                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6097                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6098                         funding_pubkey: keys.funding_pubkey,
6099                         revocation_basepoint: keys.revocation_basepoint,
6100                         payment_point: keys.payment_point,
6101                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6102                         htlc_basepoint: keys.htlc_basepoint,
6103                         first_per_commitment_point,
6104                         channel_flags: if self.context.config.announced_channel {1} else {0},
6105                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6106                                 Some(script) => script.clone().into_inner(),
6107                                 None => Builder::new().into_script(),
6108                         }),
6109                         channel_type: Some(self.context.channel_type.clone()),
6110                 }
6111         }
6112
6113         // Message handlers
6114         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6115                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6116
6117                 // Check sanity of message fields:
6118                 if !self.context.is_outbound() {
6119                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6120                 }
6121                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6122                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6123                 }
6124                 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6125                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6126                 }
6127                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6128                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6129                 }
6130                 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6131                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6132                 }
6133                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6134                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6135                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6136                 }
6137                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6138                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6139                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6140                 }
6141                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6142                 if msg.to_self_delay > max_delay_acceptable {
6143                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6144                 }
6145                 if msg.max_accepted_htlcs < 1 {
6146                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6147                 }
6148                 if msg.max_accepted_htlcs > MAX_HTLCS {
6149                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6150                 }
6151
6152                 // Now check against optional parameters as set by config...
6153                 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6154                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6155                 }
6156                 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6157                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6158                 }
6159                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6160                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6161                 }
6162                 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6163                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6164                 }
6165                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6166                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6167                 }
6168                 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6169                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6170                 }
6171                 if msg.minimum_depth > peer_limits.max_minimum_depth {
6172                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6173                 }
6174
6175                 if let Some(ty) = &msg.channel_type {
6176                         if *ty != self.context.channel_type {
6177                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6178                         }
6179                 } else if their_features.supports_channel_type() {
6180                         // Assume they've accepted the channel type as they said they understand it.
6181                 } else {
6182                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6183                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6184                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6185                         }
6186                         self.context.channel_type = channel_type.clone();
6187                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
6188                 }
6189
6190                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6191                         match &msg.shutdown_scriptpubkey {
6192                                 &Some(ref script) => {
6193                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6194                                         if script.len() == 0 {
6195                                                 None
6196                                         } else {
6197                                                 if !script::is_bolt2_compliant(&script, their_features) {
6198                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6199                                                 }
6200                                                 Some(script.clone())
6201                                         }
6202                                 },
6203                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6204                                 &None => {
6205                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6206                                 }
6207                         }
6208                 } else { None };
6209
6210                 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6211                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6212                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6213                 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6214                 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6215
6216                 if peer_limits.trust_own_funding_0conf {
6217                         self.context.minimum_depth = Some(msg.minimum_depth);
6218                 } else {
6219                         self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6220                 }
6221
6222                 let counterparty_pubkeys = ChannelPublicKeys {
6223                         funding_pubkey: msg.funding_pubkey,
6224                         revocation_basepoint: msg.revocation_basepoint,
6225                         payment_point: msg.payment_point,
6226                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6227                         htlc_basepoint: msg.htlc_basepoint
6228                 };
6229
6230                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6231                         selected_contest_delay: msg.to_self_delay,
6232                         pubkeys: counterparty_pubkeys,
6233                 });
6234
6235                 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6236                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6237
6238                 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6239                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6240
6241                 Ok(())
6242         }
6243 }
6244
6245 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6246 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6247         pub context: ChannelContext<SP>,
6248         pub unfunded_context: UnfundedChannelContext,
6249 }
6250
6251 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6252         /// Creates a new channel from a remote sides' request for one.
6253         /// Assumes chain_hash has already been checked and corresponds with what we expect!
6254         pub fn new<ES: Deref, F: Deref, L: Deref>(
6255                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6256                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6257                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6258                 current_chain_height: u32, logger: &L, is_0conf: bool,
6259         ) -> Result<InboundV1Channel<SP>, ChannelError>
6260                 where ES::Target: EntropySource,
6261                           F::Target: FeeEstimator,
6262                           L::Target: Logger,
6263         {
6264                 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6265
6266                 // First check the channel type is known, failing before we do anything else if we don't
6267                 // support this channel type.
6268                 let channel_type = if let Some(channel_type) = &msg.channel_type {
6269                         if channel_type.supports_any_optional_bits() {
6270                                 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6271                         }
6272
6273                         // We only support the channel types defined by the `ChannelManager` in
6274                         // `provided_channel_type_features`. The channel type must always support
6275                         // `static_remote_key`.
6276                         if !channel_type.requires_static_remote_key() {
6277                                 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6278                         }
6279                         // Make sure we support all of the features behind the channel type.
6280                         if !channel_type.is_subset(our_supported_features) {
6281                                 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6282                         }
6283                         if channel_type.requires_scid_privacy() && announced_channel {
6284                                 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6285                         }
6286                         channel_type.clone()
6287                 } else {
6288                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6289                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6290                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6291                         }
6292                         channel_type
6293                 };
6294
6295                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6296                 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6297                 let pubkeys = holder_signer.pubkeys().clone();
6298                 let counterparty_pubkeys = ChannelPublicKeys {
6299                         funding_pubkey: msg.funding_pubkey,
6300                         revocation_basepoint: msg.revocation_basepoint,
6301                         payment_point: msg.payment_point,
6302                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6303                         htlc_basepoint: msg.htlc_basepoint
6304                 };
6305
6306                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6307                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6308                 }
6309
6310                 // Check sanity of message fields:
6311                 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6312                         return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6313                 }
6314                 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6315                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6316                 }
6317                 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6318                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6319                 }
6320                 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6321                 if msg.push_msat > full_channel_value_msat {
6322                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6323                 }
6324                 if msg.dust_limit_satoshis > msg.funding_satoshis {
6325                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6326                 }
6327                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6328                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6329                 }
6330                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6331
6332                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6333                 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6334                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6335                 }
6336                 if msg.max_accepted_htlcs < 1 {
6337                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6338                 }
6339                 if msg.max_accepted_htlcs > MAX_HTLCS {
6340                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6341                 }
6342
6343                 // Now check against optional parameters as set by config...
6344                 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6345                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6346                 }
6347                 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6348                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
6349                 }
6350                 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6351                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6352                 }
6353                 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6354                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6355                 }
6356                 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6357                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6358                 }
6359                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6360                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6361                 }
6362                 if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
6363                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6364                 }
6365
6366                 // Convert things into internal flags and prep our state:
6367
6368                 if config.channel_handshake_limits.force_announced_channel_preference {
6369                         if config.channel_handshake_config.announced_channel != announced_channel {
6370                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6371                         }
6372                 }
6373
6374                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6375                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6376                         // Protocol level safety check in place, although it should never happen because
6377                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6378                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6379                 }
6380                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6381                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6382                 }
6383                 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6384                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6385                                 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6386                 }
6387                 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6388                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6389                 }
6390
6391                 // check if the funder's amount for the initial commitment tx is sufficient
6392                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6393                 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6394                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6395                 } else {
6396                         0
6397                 };
6398                 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6399                 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6400                 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6401                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6402                 }
6403
6404                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6405                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6406                 // want to push much to us), our counterparty should always have more than our reserve.
6407                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6408                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6409                 }
6410
6411                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6412                         match &msg.shutdown_scriptpubkey {
6413                                 &Some(ref script) => {
6414                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6415                                         if script.len() == 0 {
6416                                                 None
6417                                         } else {
6418                                                 if !script::is_bolt2_compliant(&script, their_features) {
6419                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6420                                                 }
6421                                                 Some(script.clone())
6422                                         }
6423                                 },
6424                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6425                                 &None => {
6426                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6427                                 }
6428                         }
6429                 } else { None };
6430
6431                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6432                         match signer_provider.get_shutdown_scriptpubkey() {
6433                                 Ok(scriptpubkey) => Some(scriptpubkey),
6434                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6435                         }
6436                 } else { None };
6437
6438                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6439                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6440                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6441                         }
6442                 }
6443
6444                 let destination_script = match signer_provider.get_destination_script() {
6445                         Ok(script) => script,
6446                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6447                 };
6448
6449                 let mut secp_ctx = Secp256k1::new();
6450                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6451
6452                 let minimum_depth = if is_0conf {
6453                         Some(0)
6454                 } else {
6455                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6456                 };
6457
6458                 let chan = Self {
6459                         context: ChannelContext {
6460                                 user_id,
6461
6462                                 config: LegacyChannelConfig {
6463                                         options: config.channel_config.clone(),
6464                                         announced_channel,
6465                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6466                                 },
6467
6468                                 prev_config: None,
6469
6470                                 inbound_handshake_limits_override: None,
6471
6472                                 temporary_channel_id: Some(msg.temporary_channel_id),
6473                                 channel_id: msg.temporary_channel_id,
6474                                 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6475                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6476                                 secp_ctx,
6477
6478                                 latest_monitor_update_id: 0,
6479
6480                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6481                                 shutdown_scriptpubkey,
6482                                 destination_script,
6483
6484                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6485                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6486                                 value_to_self_msat: msg.push_msat,
6487
6488                                 pending_inbound_htlcs: Vec::new(),
6489                                 pending_outbound_htlcs: Vec::new(),
6490                                 holding_cell_htlc_updates: Vec::new(),
6491                                 pending_update_fee: None,
6492                                 holding_cell_update_fee: None,
6493                                 next_holder_htlc_id: 0,
6494                                 next_counterparty_htlc_id: 0,
6495                                 update_time_counter: 1,
6496
6497                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6498
6499                                 monitor_pending_channel_ready: false,
6500                                 monitor_pending_revoke_and_ack: false,
6501                                 monitor_pending_commitment_signed: false,
6502                                 monitor_pending_forwards: Vec::new(),
6503                                 monitor_pending_failures: Vec::new(),
6504                                 monitor_pending_finalized_fulfills: Vec::new(),
6505
6506                                 #[cfg(debug_assertions)]
6507                                 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6508                                 #[cfg(debug_assertions)]
6509                                 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6510
6511                                 last_sent_closing_fee: None,
6512                                 pending_counterparty_closing_signed: None,
6513                                 closing_fee_limits: None,
6514                                 target_closing_feerate_sats_per_kw: None,
6515
6516                                 funding_tx_confirmed_in: None,
6517                                 funding_tx_confirmation_height: 0,
6518                                 short_channel_id: None,
6519                                 channel_creation_height: current_chain_height,
6520
6521                                 feerate_per_kw: msg.feerate_per_kw,
6522                                 channel_value_satoshis: msg.funding_satoshis,
6523                                 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6524                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6525                                 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6526                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6527                                 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6528                                 holder_selected_channel_reserve_satoshis,
6529                                 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6530                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6531                                 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6532                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6533                                 minimum_depth,
6534
6535                                 counterparty_forwarding_info: None,
6536
6537                                 channel_transaction_parameters: ChannelTransactionParameters {
6538                                         holder_pubkeys: pubkeys,
6539                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6540                                         is_outbound_from_holder: false,
6541                                         counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6542                                                 selected_contest_delay: msg.to_self_delay,
6543                                                 pubkeys: counterparty_pubkeys,
6544                                         }),
6545                                         funding_outpoint: None,
6546                                         channel_type_features: channel_type.clone()
6547                                 },
6548                                 funding_transaction: None,
6549                                 is_batch_funding: None,
6550
6551                                 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6552                                 counterparty_prev_commitment_point: None,
6553                                 counterparty_node_id,
6554
6555                                 counterparty_shutdown_scriptpubkey,
6556
6557                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6558
6559                                 channel_update_status: ChannelUpdateStatus::Enabled,
6560                                 closing_signed_in_flight: false,
6561
6562                                 announcement_sigs: None,
6563
6564                                 #[cfg(any(test, fuzzing))]
6565                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6566                                 #[cfg(any(test, fuzzing))]
6567                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6568
6569                                 workaround_lnd_bug_4006: None,
6570                                 sent_message_awaiting_response: None,
6571
6572                                 latest_inbound_scid_alias: None,
6573                                 outbound_scid_alias: 0,
6574
6575                                 channel_pending_event_emitted: false,
6576                                 channel_ready_event_emitted: false,
6577
6578                                 #[cfg(any(test, fuzzing))]
6579                                 historical_inbound_htlc_fulfills: HashSet::new(),
6580
6581                                 channel_type,
6582                                 channel_keys_id,
6583
6584                                 blocked_monitor_updates: Vec::new(),
6585                         },
6586                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6587                 };
6588
6589                 Ok(chan)
6590         }
6591
6592         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6593         /// should be sent back to the counterparty node.
6594         ///
6595         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6596         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6597                 if self.context.is_outbound() {
6598                         panic!("Tried to send accept_channel for an outbound channel?");
6599                 }
6600                 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6601                         panic!("Tried to send accept_channel after channel had moved forward");
6602                 }
6603                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6604                         panic!("Tried to send an accept_channel for a channel that has already advanced");
6605                 }
6606
6607                 self.generate_accept_channel_message()
6608         }
6609
6610         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6611         /// inbound channel. If the intention is to accept an inbound channel, use
6612         /// [`InboundV1Channel::accept_inbound_channel`] instead.
6613         ///
6614         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6615         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6616                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6617                 let keys = self.context.get_holder_pubkeys();
6618
6619                 msgs::AcceptChannel {
6620                         temporary_channel_id: self.context.channel_id,
6621                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6622                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6623                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6624                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6625                         minimum_depth: self.context.minimum_depth.unwrap(),
6626                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6627                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6628                         funding_pubkey: keys.funding_pubkey,
6629                         revocation_basepoint: keys.revocation_basepoint,
6630                         payment_point: keys.payment_point,
6631                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6632                         htlc_basepoint: keys.htlc_basepoint,
6633                         first_per_commitment_point,
6634                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6635                                 Some(script) => script.clone().into_inner(),
6636                                 None => Builder::new().into_script(),
6637                         }),
6638                         channel_type: Some(self.context.channel_type.clone()),
6639                         #[cfg(taproot)]
6640                         next_local_nonce: None,
6641                 }
6642         }
6643
6644         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6645         /// inbound channel without accepting it.
6646         ///
6647         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6648         #[cfg(test)]
6649         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6650                 self.generate_accept_channel_message()
6651         }
6652
6653         fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
6654                 let funding_script = self.context.get_funding_redeemscript();
6655
6656                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6657                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6658                 {
6659                         let trusted_tx = initial_commitment_tx.trust();
6660                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6661                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6662                         // They sign the holder commitment transaction...
6663                         log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6664                                 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6665                                 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6666                                 encode::serialize_hex(&funding_script), &self.context.channel_id());
6667                         secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6668                 }
6669
6670                 let counterparty_keys = self.context.build_remote_transaction_keys();
6671                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6672
6673                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6674                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6675                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6676                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6677
6678                 match &self.context.holder_signer {
6679                         // TODO (arik): move match into calling method for Taproot
6680                         ChannelSignerType::Ecdsa(ecdsa) => {
6681                                 let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
6682                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
6683
6684                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
6685                                 Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature))
6686                         }
6687                 }
6688         }
6689
6690         pub fn funding_created<L: Deref>(
6691                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6692         ) -> Result<(Channel<SP>, msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
6693         where
6694                 L::Target: Logger
6695         {
6696                 if self.context.is_outbound() {
6697                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6698                 }
6699                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6700                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6701                         // remember the channel, so it's safe to just send an error_message here and drop the
6702                         // channel.
6703                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6704                 }
6705                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6706                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6707                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6708                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6709                 }
6710
6711                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6712                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6713                 // This is an externally observable change before we finish all our checks.  In particular
6714                 // funding_created_signature may fail.
6715                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6716
6717                 let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
6718                         Ok(res) => res,
6719                         Err(ChannelError::Close(e)) => {
6720                                 self.context.channel_transaction_parameters.funding_outpoint = None;
6721                                 return Err((self, ChannelError::Close(e)));
6722                         },
6723                         Err(e) => {
6724                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
6725                                 // to make sure we don't continue with an inconsistent state.
6726                                 panic!("unexpected error type from funding_created_signature {:?}", e);
6727                         }
6728                 };
6729
6730                 let holder_commitment_tx = HolderCommitmentTransaction::new(
6731                         initial_commitment_tx,
6732                         msg.signature,
6733                         Vec::new(),
6734                         &self.context.get_holder_pubkeys().funding_pubkey,
6735                         self.context.counterparty_funding_pubkey()
6736                 );
6737
6738                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6739                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6740                 }
6741
6742                 // Now that we're past error-generating stuff, update our local state:
6743
6744                 let funding_redeemscript = self.context.get_funding_redeemscript();
6745                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6746                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6747                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6748                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6749                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6750                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6751                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
6752                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6753                                                           &self.context.channel_transaction_parameters,
6754                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
6755                                                           obscure_factor,
6756                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
6757
6758                 channel_monitor.provide_initial_counterparty_commitment_tx(
6759                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6760                         self.context.cur_counterparty_commitment_transaction_number,
6761                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6762                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6763                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6764
6765                 self.context.channel_state = ChannelState::FundingSent as u32;
6766                 self.context.channel_id = funding_txo.to_channel_id();
6767                 self.context.cur_counterparty_commitment_transaction_number -= 1;
6768                 self.context.cur_holder_commitment_transaction_number -= 1;
6769
6770                 log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
6771
6772                 // Promote the channel to a full-fledged one now that we have updated the state and have a
6773                 // `ChannelMonitor`.
6774                 let mut channel = Channel {
6775                         context: self.context,
6776                 };
6777                 let channel_id = channel.context.channel_id.clone();
6778                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6779                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6780
6781                 Ok((channel, msgs::FundingSigned {
6782                         channel_id,
6783                         signature,
6784                         #[cfg(taproot)]
6785                         partial_signature_with_nonce: None,
6786                 }, channel_monitor))
6787         }
6788 }
6789
6790 const SERIALIZATION_VERSION: u8 = 3;
6791 const MIN_SERIALIZATION_VERSION: u8 = 2;
6792
6793 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6794         (0, FailRelay),
6795         (1, FailMalformed),
6796         (2, Fulfill),
6797 );
6798
6799 impl Writeable for ChannelUpdateStatus {
6800         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6801                 // We only care about writing out the current state as it was announced, ie only either
6802                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6803                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6804                 match self {
6805                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6806                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6807                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6808                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6809                 }
6810                 Ok(())
6811         }
6812 }
6813
6814 impl Readable for ChannelUpdateStatus {
6815         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6816                 Ok(match <u8 as Readable>::read(reader)? {
6817                         0 => ChannelUpdateStatus::Enabled,
6818                         1 => ChannelUpdateStatus::Disabled,
6819                         _ => return Err(DecodeError::InvalidValue),
6820                 })
6821         }
6822 }
6823
6824 impl Writeable for AnnouncementSigsState {
6825         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6826                 // We only care about writing out the current state as if we had just disconnected, at
6827                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6828                 match self {
6829                         AnnouncementSigsState::NotSent => 0u8.write(writer),
6830                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
6831                         AnnouncementSigsState::Committed => 0u8.write(writer),
6832                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6833                 }
6834         }
6835 }
6836
6837 impl Readable for AnnouncementSigsState {
6838         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6839                 Ok(match <u8 as Readable>::read(reader)? {
6840                         0 => AnnouncementSigsState::NotSent,
6841                         1 => AnnouncementSigsState::PeerReceived,
6842                         _ => return Err(DecodeError::InvalidValue),
6843                 })
6844         }
6845 }
6846
6847 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
6848         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6849                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6850                 // called.
6851
6852                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6853
6854                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6855                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6856                 // the low bytes now and the optional high bytes later.
6857                 let user_id_low = self.context.user_id as u64;
6858                 user_id_low.write(writer)?;
6859
6860                 // Version 1 deserializers expected to read parts of the config object here. Version 2
6861                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6862                 // `minimum_depth` we simply write dummy values here.
6863                 writer.write_all(&[0; 8])?;
6864
6865                 self.context.channel_id.write(writer)?;
6866                 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6867                 self.context.channel_value_satoshis.write(writer)?;
6868
6869                 self.context.latest_monitor_update_id.write(writer)?;
6870
6871                 let mut key_data = VecWriter(Vec::new());
6872                 // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
6873                 self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
6874                 assert!(key_data.0.len() < core::usize::MAX);
6875                 assert!(key_data.0.len() < core::u32::MAX as usize);
6876                 (key_data.0.len() as u32).write(writer)?;
6877                 writer.write_all(&key_data.0[..])?;
6878
6879                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6880                 // deserialized from that format.
6881                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6882                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6883                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6884                 }
6885                 self.context.destination_script.write(writer)?;
6886
6887                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6888                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6889                 self.context.value_to_self_msat.write(writer)?;
6890
6891                 let mut dropped_inbound_htlcs = 0;
6892                 for htlc in self.context.pending_inbound_htlcs.iter() {
6893                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6894                                 dropped_inbound_htlcs += 1;
6895                         }
6896                 }
6897                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6898                 for htlc in self.context.pending_inbound_htlcs.iter() {
6899                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6900                                 continue; // Drop
6901                         }
6902                         htlc.htlc_id.write(writer)?;
6903                         htlc.amount_msat.write(writer)?;
6904                         htlc.cltv_expiry.write(writer)?;
6905                         htlc.payment_hash.write(writer)?;
6906                         match &htlc.state {
6907                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6908                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6909                                         1u8.write(writer)?;
6910                                         htlc_state.write(writer)?;
6911                                 },
6912                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6913                                         2u8.write(writer)?;
6914                                         htlc_state.write(writer)?;
6915                                 },
6916                                 &InboundHTLCState::Committed => {
6917                                         3u8.write(writer)?;
6918                                 },
6919                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6920                                         4u8.write(writer)?;
6921                                         removal_reason.write(writer)?;
6922                                 },
6923                         }
6924                 }
6925
6926                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6927                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
6928
6929                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6930                 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
6931                         htlc.htlc_id.write(writer)?;
6932                         htlc.amount_msat.write(writer)?;
6933                         htlc.cltv_expiry.write(writer)?;
6934                         htlc.payment_hash.write(writer)?;
6935                         htlc.source.write(writer)?;
6936                         match &htlc.state {
6937                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6938                                         0u8.write(writer)?;
6939                                         onion_packet.write(writer)?;
6940                                 },
6941                                 &OutboundHTLCState::Committed => {
6942                                         1u8.write(writer)?;
6943                                 },
6944                                 &OutboundHTLCState::RemoteRemoved(_) => {
6945                                         // Treat this as a Committed because we haven't received the CS - they'll
6946                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6947                                         1u8.write(writer)?;
6948                                 },
6949                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6950                                         3u8.write(writer)?;
6951                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6952                                                 preimages.push(preimage);
6953                                         }
6954                                         let reason: Option<&HTLCFailReason> = outcome.into();
6955                                         reason.write(writer)?;
6956                                 }
6957                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6958                                         4u8.write(writer)?;
6959                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6960                                                 preimages.push(preimage);
6961                                         }
6962                                         let reason: Option<&HTLCFailReason> = outcome.into();
6963                                         reason.write(writer)?;
6964                                 }
6965                         }
6966                         if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
6967                                 if pending_outbound_skimmed_fees.is_empty() {
6968                                         for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
6969                                 }
6970                                 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
6971                         } else if !pending_outbound_skimmed_fees.is_empty() {
6972                                 pending_outbound_skimmed_fees.push(None);
6973                         }
6974                 }
6975
6976                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
6977                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
6978                 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
6979                         match update {
6980                                 &HTLCUpdateAwaitingACK::AddHTLC {
6981                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
6982                                         skimmed_fee_msat,
6983                                 } => {
6984                                         0u8.write(writer)?;
6985                                         amount_msat.write(writer)?;
6986                                         cltv_expiry.write(writer)?;
6987                                         payment_hash.write(writer)?;
6988                                         source.write(writer)?;
6989                                         onion_routing_packet.write(writer)?;
6990
6991                                         if let Some(skimmed_fee) = skimmed_fee_msat {
6992                                                 if holding_cell_skimmed_fees.is_empty() {
6993                                                         for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
6994                                                 }
6995                                                 holding_cell_skimmed_fees.push(Some(skimmed_fee));
6996                                         } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
6997                                 },
6998                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
6999                                         1u8.write(writer)?;
7000                                         payment_preimage.write(writer)?;
7001                                         htlc_id.write(writer)?;
7002                                 },
7003                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7004                                         2u8.write(writer)?;
7005                                         htlc_id.write(writer)?;
7006                                         err_packet.write(writer)?;
7007                                 }
7008                         }
7009                 }
7010
7011                 match self.context.resend_order {
7012                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7013                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7014                 }
7015
7016                 self.context.monitor_pending_channel_ready.write(writer)?;
7017                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7018                 self.context.monitor_pending_commitment_signed.write(writer)?;
7019
7020                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7021                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7022                         pending_forward.write(writer)?;
7023                         htlc_id.write(writer)?;
7024                 }
7025
7026                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7027                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7028                         htlc_source.write(writer)?;
7029                         payment_hash.write(writer)?;
7030                         fail_reason.write(writer)?;
7031                 }
7032
7033                 if self.context.is_outbound() {
7034                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7035                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7036                         Some(feerate).write(writer)?;
7037                 } else {
7038                         // As for inbound HTLCs, if the update was only announced and never committed in a
7039                         // commitment_signed, drop it.
7040                         None::<u32>.write(writer)?;
7041                 }
7042                 self.context.holding_cell_update_fee.write(writer)?;
7043
7044                 self.context.next_holder_htlc_id.write(writer)?;
7045                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7046                 self.context.update_time_counter.write(writer)?;
7047                 self.context.feerate_per_kw.write(writer)?;
7048
7049                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7050                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7051                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7052                 // consider the stale state on reload.
7053                 0u8.write(writer)?;
7054
7055                 self.context.funding_tx_confirmed_in.write(writer)?;
7056                 self.context.funding_tx_confirmation_height.write(writer)?;
7057                 self.context.short_channel_id.write(writer)?;
7058
7059                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7060                 self.context.holder_dust_limit_satoshis.write(writer)?;
7061                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7062
7063                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7064                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7065
7066                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7067                 self.context.holder_htlc_minimum_msat.write(writer)?;
7068                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7069
7070                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7071                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7072
7073                 match &self.context.counterparty_forwarding_info {
7074                         Some(info) => {
7075                                 1u8.write(writer)?;
7076                                 info.fee_base_msat.write(writer)?;
7077                                 info.fee_proportional_millionths.write(writer)?;
7078                                 info.cltv_expiry_delta.write(writer)?;
7079                         },
7080                         None => 0u8.write(writer)?
7081                 }
7082
7083                 self.context.channel_transaction_parameters.write(writer)?;
7084                 self.context.funding_transaction.write(writer)?;
7085
7086                 self.context.counterparty_cur_commitment_point.write(writer)?;
7087                 self.context.counterparty_prev_commitment_point.write(writer)?;
7088                 self.context.counterparty_node_id.write(writer)?;
7089
7090                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7091
7092                 self.context.commitment_secrets.write(writer)?;
7093
7094                 self.context.channel_update_status.write(writer)?;
7095
7096                 #[cfg(any(test, fuzzing))]
7097                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7098                 #[cfg(any(test, fuzzing))]
7099                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7100                         htlc.write(writer)?;
7101                 }
7102
7103                 // If the channel type is something other than only-static-remote-key, then we need to have
7104                 // older clients fail to deserialize this channel at all. If the type is
7105                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7106                 // out at all.
7107                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7108                         Some(&self.context.channel_type) } else { None };
7109
7110                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7111                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7112                 // a different percentage of the channel value then 10%, which older versions of LDK used
7113                 // to set it to before the percentage was made configurable.
7114                 let serialized_holder_selected_reserve =
7115                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7116                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7117
7118                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7119                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7120                 let serialized_holder_htlc_max_in_flight =
7121                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7122                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7123
7124                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7125                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7126
7127                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7128                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7129                 // we write the high bytes as an option here.
7130                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7131
7132                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7133
7134                 write_tlv_fields!(writer, {
7135                         (0, self.context.announcement_sigs, option),
7136                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7137                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
7138                         // them twice, once with their original default values above, and once as an option
7139                         // here. On the read side, old versions will simply ignore the odd-type entries here,
7140                         // and new versions map the default values to None and allow the TLV entries here to
7141                         // override that.
7142                         (1, self.context.minimum_depth, option),
7143                         (2, chan_type, option),
7144                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7145                         (4, serialized_holder_selected_reserve, option),
7146                         (5, self.context.config, required),
7147                         (6, serialized_holder_htlc_max_in_flight, option),
7148                         (7, self.context.shutdown_scriptpubkey, option),
7149                         (8, self.context.blocked_monitor_updates, optional_vec),
7150                         (9, self.context.target_closing_feerate_sats_per_kw, option),
7151                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7152                         (13, self.context.channel_creation_height, required),
7153                         (15, preimages, required_vec),
7154                         (17, self.context.announcement_sigs_state, required),
7155                         (19, self.context.latest_inbound_scid_alias, option),
7156                         (21, self.context.outbound_scid_alias, required),
7157                         (23, channel_ready_event_emitted, option),
7158                         (25, user_id_high_opt, option),
7159                         (27, self.context.channel_keys_id, required),
7160                         (28, holder_max_accepted_htlcs, option),
7161                         (29, self.context.temporary_channel_id, option),
7162                         (31, channel_pending_event_emitted, option),
7163                         (35, pending_outbound_skimmed_fees, optional_vec),
7164                         (37, holding_cell_skimmed_fees, optional_vec),
7165                         (38, self.context.is_batch_funding, option),
7166                 });
7167
7168                 Ok(())
7169         }
7170 }
7171
7172 const MAX_ALLOC_SIZE: usize = 64*1024;
7173 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7174                 where
7175                         ES::Target: EntropySource,
7176                         SP::Target: SignerProvider
7177 {
7178         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7179                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7180                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7181
7182                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7183                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7184                 // the low bytes now and the high bytes later.
7185                 let user_id_low: u64 = Readable::read(reader)?;
7186
7187                 let mut config = Some(LegacyChannelConfig::default());
7188                 if ver == 1 {
7189                         // Read the old serialization of the ChannelConfig from version 0.0.98.
7190                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7191                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7192                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7193                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7194                 } else {
7195                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7196                         let mut _val: u64 = Readable::read(reader)?;
7197                 }
7198
7199                 let channel_id = Readable::read(reader)?;
7200                 let channel_state = Readable::read(reader)?;
7201                 let channel_value_satoshis = Readable::read(reader)?;
7202
7203                 let latest_monitor_update_id = Readable::read(reader)?;
7204
7205                 let mut keys_data = None;
7206                 if ver <= 2 {
7207                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7208                         // the `channel_keys_id` TLV is present below.
7209                         let keys_len: u32 = Readable::read(reader)?;
7210                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7211                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
7212                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7213                                 let mut data = [0; 1024];
7214                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7215                                 reader.read_exact(read_slice)?;
7216                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7217                         }
7218                 }
7219
7220                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7221                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7222                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7223                         Err(_) => None,
7224                 };
7225                 let destination_script = Readable::read(reader)?;
7226
7227                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7228                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7229                 let value_to_self_msat = Readable::read(reader)?;
7230
7231                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7232
7233                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7234                 for _ in 0..pending_inbound_htlc_count {
7235                         pending_inbound_htlcs.push(InboundHTLCOutput {
7236                                 htlc_id: Readable::read(reader)?,
7237                                 amount_msat: Readable::read(reader)?,
7238                                 cltv_expiry: Readable::read(reader)?,
7239                                 payment_hash: Readable::read(reader)?,
7240                                 state: match <u8 as Readable>::read(reader)? {
7241                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7242                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7243                                         3 => InboundHTLCState::Committed,
7244                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7245                                         _ => return Err(DecodeError::InvalidValue),
7246                                 },
7247                         });
7248                 }
7249
7250                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7251                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7252                 for _ in 0..pending_outbound_htlc_count {
7253                         pending_outbound_htlcs.push(OutboundHTLCOutput {
7254                                 htlc_id: Readable::read(reader)?,
7255                                 amount_msat: Readable::read(reader)?,
7256                                 cltv_expiry: Readable::read(reader)?,
7257                                 payment_hash: Readable::read(reader)?,
7258                                 source: Readable::read(reader)?,
7259                                 state: match <u8 as Readable>::read(reader)? {
7260                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7261                                         1 => OutboundHTLCState::Committed,
7262                                         2 => {
7263                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7264                                                 OutboundHTLCState::RemoteRemoved(option.into())
7265                                         },
7266                                         3 => {
7267                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7268                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7269                                         },
7270                                         4 => {
7271                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7272                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7273                                         },
7274                                         _ => return Err(DecodeError::InvalidValue),
7275                                 },
7276                                 skimmed_fee_msat: None,
7277                         });
7278                 }
7279
7280                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7281                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7282                 for _ in 0..holding_cell_htlc_update_count {
7283                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7284                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
7285                                         amount_msat: Readable::read(reader)?,
7286                                         cltv_expiry: Readable::read(reader)?,
7287                                         payment_hash: Readable::read(reader)?,
7288                                         source: Readable::read(reader)?,
7289                                         onion_routing_packet: Readable::read(reader)?,
7290                                         skimmed_fee_msat: None,
7291                                 },
7292                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7293                                         payment_preimage: Readable::read(reader)?,
7294                                         htlc_id: Readable::read(reader)?,
7295                                 },
7296                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
7297                                         htlc_id: Readable::read(reader)?,
7298                                         err_packet: Readable::read(reader)?,
7299                                 },
7300                                 _ => return Err(DecodeError::InvalidValue),
7301                         });
7302                 }
7303
7304                 let resend_order = match <u8 as Readable>::read(reader)? {
7305                         0 => RAACommitmentOrder::CommitmentFirst,
7306                         1 => RAACommitmentOrder::RevokeAndACKFirst,
7307                         _ => return Err(DecodeError::InvalidValue),
7308                 };
7309
7310                 let monitor_pending_channel_ready = Readable::read(reader)?;
7311                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7312                 let monitor_pending_commitment_signed = Readable::read(reader)?;
7313
7314                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7315                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7316                 for _ in 0..monitor_pending_forwards_count {
7317                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7318                 }
7319
7320                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7321                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7322                 for _ in 0..monitor_pending_failures_count {
7323                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7324                 }
7325
7326                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7327
7328                 let holding_cell_update_fee = Readable::read(reader)?;
7329
7330                 let next_holder_htlc_id = Readable::read(reader)?;
7331                 let next_counterparty_htlc_id = Readable::read(reader)?;
7332                 let update_time_counter = Readable::read(reader)?;
7333                 let feerate_per_kw = Readable::read(reader)?;
7334
7335                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7336                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7337                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7338                 // consider the stale state on reload.
7339                 match <u8 as Readable>::read(reader)? {
7340                         0 => {},
7341                         1 => {
7342                                 let _: u32 = Readable::read(reader)?;
7343                                 let _: u64 = Readable::read(reader)?;
7344                                 let _: Signature = Readable::read(reader)?;
7345                         },
7346                         _ => return Err(DecodeError::InvalidValue),
7347                 }
7348
7349                 let funding_tx_confirmed_in = Readable::read(reader)?;
7350                 let funding_tx_confirmation_height = Readable::read(reader)?;
7351                 let short_channel_id = Readable::read(reader)?;
7352
7353                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7354                 let holder_dust_limit_satoshis = Readable::read(reader)?;
7355                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7356                 let mut counterparty_selected_channel_reserve_satoshis = None;
7357                 if ver == 1 {
7358                         // Read the old serialization from version 0.0.98.
7359                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7360                 } else {
7361                         // Read the 8 bytes of backwards-compatibility data.
7362                         let _dummy: u64 = Readable::read(reader)?;
7363                 }
7364                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7365                 let holder_htlc_minimum_msat = Readable::read(reader)?;
7366                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7367
7368                 let mut minimum_depth = None;
7369                 if ver == 1 {
7370                         // Read the old serialization from version 0.0.98.
7371                         minimum_depth = Some(Readable::read(reader)?);
7372                 } else {
7373                         // Read the 4 bytes of backwards-compatibility data.
7374                         let _dummy: u32 = Readable::read(reader)?;
7375                 }
7376
7377                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7378                         0 => None,
7379                         1 => Some(CounterpartyForwardingInfo {
7380                                 fee_base_msat: Readable::read(reader)?,
7381                                 fee_proportional_millionths: Readable::read(reader)?,
7382                                 cltv_expiry_delta: Readable::read(reader)?,
7383                         }),
7384                         _ => return Err(DecodeError::InvalidValue),
7385                 };
7386
7387                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7388                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7389
7390                 let counterparty_cur_commitment_point = Readable::read(reader)?;
7391
7392                 let counterparty_prev_commitment_point = Readable::read(reader)?;
7393                 let counterparty_node_id = Readable::read(reader)?;
7394
7395                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7396                 let commitment_secrets = Readable::read(reader)?;
7397
7398                 let channel_update_status = Readable::read(reader)?;
7399
7400                 #[cfg(any(test, fuzzing))]
7401                 let mut historical_inbound_htlc_fulfills = HashSet::new();
7402                 #[cfg(any(test, fuzzing))]
7403                 {
7404                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
7405                         for _ in 0..htlc_fulfills_len {
7406                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7407                         }
7408                 }
7409
7410                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7411                         Some((feerate, if channel_parameters.is_outbound_from_holder {
7412                                 FeeUpdateState::Outbound
7413                         } else {
7414                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7415                         }))
7416                 } else {
7417                         None
7418                 };
7419
7420                 let mut announcement_sigs = None;
7421                 let mut target_closing_feerate_sats_per_kw = None;
7422                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7423                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7424                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7425                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7426                 // only, so we default to that if none was written.
7427                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7428                 let mut channel_creation_height = Some(serialized_height);
7429                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7430
7431                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7432                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7433                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7434                 let mut latest_inbound_scid_alias = None;
7435                 let mut outbound_scid_alias = None;
7436                 let mut channel_pending_event_emitted = None;
7437                 let mut channel_ready_event_emitted = None;
7438
7439                 let mut user_id_high_opt: Option<u64> = None;
7440                 let mut channel_keys_id: Option<[u8; 32]> = None;
7441                 let mut temporary_channel_id: Option<ChannelId> = None;
7442                 let mut holder_max_accepted_htlcs: Option<u16> = None;
7443
7444                 let mut blocked_monitor_updates = Some(Vec::new());
7445
7446                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7447                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7448
7449                 let mut is_batch_funding: Option<()> = None;
7450
7451                 read_tlv_fields!(reader, {
7452                         (0, announcement_sigs, option),
7453                         (1, minimum_depth, option),
7454                         (2, channel_type, option),
7455                         (3, counterparty_selected_channel_reserve_satoshis, option),
7456                         (4, holder_selected_channel_reserve_satoshis, option),
7457                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7458                         (6, holder_max_htlc_value_in_flight_msat, option),
7459                         (7, shutdown_scriptpubkey, option),
7460                         (8, blocked_monitor_updates, optional_vec),
7461                         (9, target_closing_feerate_sats_per_kw, option),
7462                         (11, monitor_pending_finalized_fulfills, optional_vec),
7463                         (13, channel_creation_height, option),
7464                         (15, preimages_opt, optional_vec),
7465                         (17, announcement_sigs_state, option),
7466                         (19, latest_inbound_scid_alias, option),
7467                         (21, outbound_scid_alias, option),
7468                         (23, channel_ready_event_emitted, option),
7469                         (25, user_id_high_opt, option),
7470                         (27, channel_keys_id, option),
7471                         (28, holder_max_accepted_htlcs, option),
7472                         (29, temporary_channel_id, option),
7473                         (31, channel_pending_event_emitted, option),
7474                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
7475                         (37, holding_cell_skimmed_fees_opt, optional_vec),
7476                         (38, is_batch_funding, option),
7477                 });
7478
7479                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7480                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7481                         // If we've gotten to the funding stage of the channel, populate the signer with its
7482                         // required channel parameters.
7483                         let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7484                         if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7485                                 holder_signer.provide_channel_parameters(&channel_parameters);
7486                         }
7487                         (channel_keys_id, holder_signer)
7488                 } else {
7489                         // `keys_data` can be `None` if we had corrupted data.
7490                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7491                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7492                         (holder_signer.channel_keys_id(), holder_signer)
7493                 };
7494
7495                 if let Some(preimages) = preimages_opt {
7496                         let mut iter = preimages.into_iter();
7497                         for htlc in pending_outbound_htlcs.iter_mut() {
7498                                 match &htlc.state {
7499                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7500                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7501                                         }
7502                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7503                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7504                                         }
7505                                         _ => {}
7506                                 }
7507                         }
7508                         // We expect all preimages to be consumed above
7509                         if iter.next().is_some() {
7510                                 return Err(DecodeError::InvalidValue);
7511                         }
7512                 }
7513
7514                 let chan_features = channel_type.as_ref().unwrap();
7515                 if !chan_features.is_subset(our_supported_features) {
7516                         // If the channel was written by a new version and negotiated with features we don't
7517                         // understand yet, refuse to read it.
7518                         return Err(DecodeError::UnknownRequiredFeature);
7519                 }
7520
7521                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7522                 // To account for that, we're proactively setting/overriding the field here.
7523                 channel_parameters.channel_type_features = chan_features.clone();
7524
7525                 let mut secp_ctx = Secp256k1::new();
7526                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7527
7528                 // `user_id` used to be a single u64 value. In order to remain backwards
7529                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7530                 // separate u64 values.
7531                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7532
7533                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7534
7535                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7536                         let mut iter = skimmed_fees.into_iter();
7537                         for htlc in pending_outbound_htlcs.iter_mut() {
7538                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7539                         }
7540                         // We expect all skimmed fees to be consumed above
7541                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7542                 }
7543                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7544                         let mut iter = skimmed_fees.into_iter();
7545                         for htlc in holding_cell_htlc_updates.iter_mut() {
7546                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7547                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7548                                 }
7549                         }
7550                         // We expect all skimmed fees to be consumed above
7551                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7552                 }
7553
7554                 Ok(Channel {
7555                         context: ChannelContext {
7556                                 user_id,
7557
7558                                 config: config.unwrap(),
7559
7560                                 prev_config: None,
7561
7562                                 // Note that we don't care about serializing handshake limits as we only ever serialize
7563                                 // channel data after the handshake has completed.
7564                                 inbound_handshake_limits_override: None,
7565
7566                                 channel_id,
7567                                 temporary_channel_id,
7568                                 channel_state,
7569                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
7570                                 secp_ctx,
7571                                 channel_value_satoshis,
7572
7573                                 latest_monitor_update_id,
7574
7575                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7576                                 shutdown_scriptpubkey,
7577                                 destination_script,
7578
7579                                 cur_holder_commitment_transaction_number,
7580                                 cur_counterparty_commitment_transaction_number,
7581                                 value_to_self_msat,
7582
7583                                 holder_max_accepted_htlcs,
7584                                 pending_inbound_htlcs,
7585                                 pending_outbound_htlcs,
7586                                 holding_cell_htlc_updates,
7587
7588                                 resend_order,
7589
7590                                 monitor_pending_channel_ready,
7591                                 monitor_pending_revoke_and_ack,
7592                                 monitor_pending_commitment_signed,
7593                                 monitor_pending_forwards,
7594                                 monitor_pending_failures,
7595                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7596
7597                                 pending_update_fee,
7598                                 holding_cell_update_fee,
7599                                 next_holder_htlc_id,
7600                                 next_counterparty_htlc_id,
7601                                 update_time_counter,
7602                                 feerate_per_kw,
7603
7604                                 #[cfg(debug_assertions)]
7605                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7606                                 #[cfg(debug_assertions)]
7607                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7608
7609                                 last_sent_closing_fee: None,
7610                                 pending_counterparty_closing_signed: None,
7611                                 closing_fee_limits: None,
7612                                 target_closing_feerate_sats_per_kw,
7613
7614                                 funding_tx_confirmed_in,
7615                                 funding_tx_confirmation_height,
7616                                 short_channel_id,
7617                                 channel_creation_height: channel_creation_height.unwrap(),
7618
7619                                 counterparty_dust_limit_satoshis,
7620                                 holder_dust_limit_satoshis,
7621                                 counterparty_max_htlc_value_in_flight_msat,
7622                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7623                                 counterparty_selected_channel_reserve_satoshis,
7624                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7625                                 counterparty_htlc_minimum_msat,
7626                                 holder_htlc_minimum_msat,
7627                                 counterparty_max_accepted_htlcs,
7628                                 minimum_depth,
7629
7630                                 counterparty_forwarding_info,
7631
7632                                 channel_transaction_parameters: channel_parameters,
7633                                 funding_transaction,
7634                                 is_batch_funding,
7635
7636                                 counterparty_cur_commitment_point,
7637                                 counterparty_prev_commitment_point,
7638                                 counterparty_node_id,
7639
7640                                 counterparty_shutdown_scriptpubkey,
7641
7642                                 commitment_secrets,
7643
7644                                 channel_update_status,
7645                                 closing_signed_in_flight: false,
7646
7647                                 announcement_sigs,
7648
7649                                 #[cfg(any(test, fuzzing))]
7650                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7651                                 #[cfg(any(test, fuzzing))]
7652                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7653
7654                                 workaround_lnd_bug_4006: None,
7655                                 sent_message_awaiting_response: None,
7656
7657                                 latest_inbound_scid_alias,
7658                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7659                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7660
7661                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7662                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7663
7664                                 #[cfg(any(test, fuzzing))]
7665                                 historical_inbound_htlc_fulfills,
7666
7667                                 channel_type: channel_type.unwrap(),
7668                                 channel_keys_id,
7669
7670                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7671                         }
7672                 })
7673         }
7674 }
7675
7676 #[cfg(test)]
7677 mod tests {
7678         use std::cmp;
7679         use bitcoin::blockdata::constants::ChainHash;
7680         use bitcoin::blockdata::script::{Script, Builder};
7681         use bitcoin::blockdata::transaction::{Transaction, TxOut};
7682         use bitcoin::blockdata::opcodes;
7683         use bitcoin::network::constants::Network;
7684         use hex;
7685         use crate::ln::PaymentHash;
7686         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7687         use crate::ln::channel::InitFeatures;
7688         use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7689         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7690         use crate::ln::features::ChannelTypeFeatures;
7691         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7692         use crate::ln::script::ShutdownScript;
7693         use crate::ln::chan_utils;
7694         use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7695         use crate::chain::BestBlock;
7696         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7697         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7698         use crate::chain::transaction::OutPoint;
7699         use crate::routing::router::Path;
7700         use crate::util::config::UserConfig;
7701         use crate::util::errors::APIError;
7702         use crate::util::test_utils;
7703         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7704         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7705         use bitcoin::secp256k1::ffi::Signature as FFISignature;
7706         use bitcoin::secp256k1::{SecretKey,PublicKey};
7707         use bitcoin::hashes::sha256::Hash as Sha256;
7708         use bitcoin::hashes::Hash;
7709         use bitcoin::hash_types::WPubkeyHash;
7710         use bitcoin::PackedLockTime;
7711         use bitcoin::util::address::WitnessVersion;
7712         use crate::prelude::*;
7713
7714         struct TestFeeEstimator {
7715                 fee_est: u32
7716         }
7717         impl FeeEstimator for TestFeeEstimator {
7718                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7719                         self.fee_est
7720                 }
7721         }
7722
7723         #[test]
7724         fn test_max_funding_satoshis_no_wumbo() {
7725                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7726                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7727                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7728         }
7729
7730         #[test]
7731         fn test_no_fee_check_overflow() {
7732                 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7733                 // arithmetic, causing a panic with debug assertions enabled.
7734                 let fee_est = TestFeeEstimator { fee_est: 42 };
7735                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7736                 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
7737                         &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
7738                         u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7739         }
7740
7741         struct Keys {
7742                 signer: InMemorySigner,
7743         }
7744
7745         impl EntropySource for Keys {
7746                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7747         }
7748
7749         impl SignerProvider for Keys {
7750                 type Signer = InMemorySigner;
7751
7752                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7753                         self.signer.channel_keys_id()
7754                 }
7755
7756                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7757                         self.signer.clone()
7758                 }
7759
7760                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7761
7762                 fn get_destination_script(&self) -> Result<Script, ()> {
7763                         let secp_ctx = Secp256k1::signing_only();
7764                         let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7765                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7766                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7767                 }
7768
7769                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7770                         let secp_ctx = Secp256k1::signing_only();
7771                         let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7772                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7773                 }
7774         }
7775
7776         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7777         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7778                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7779         }
7780
7781         #[test]
7782         fn upfront_shutdown_script_incompatibility() {
7783                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7784                 let non_v0_segwit_shutdown_script =
7785                         ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7786
7787                 let seed = [42; 32];
7788                 let network = Network::Testnet;
7789                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7790                 keys_provider.expect(OnGetShutdownScriptpubkey {
7791                         returns: non_v0_segwit_shutdown_script.clone(),
7792                 });
7793
7794                 let secp_ctx = Secp256k1::new();
7795                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7796                 let config = UserConfig::default();
7797                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7798                         Err(APIError::IncompatibleShutdownScript { script }) => {
7799                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7800                         },
7801                         Err(e) => panic!("Unexpected error: {:?}", e),
7802                         Ok(_) => panic!("Expected error"),
7803                 }
7804         }
7805
7806         // Check that, during channel creation, we use the same feerate in the open channel message
7807         // as we do in the Channel object creation itself.
7808         #[test]
7809         fn test_open_channel_msg_fee() {
7810                 let original_fee = 253;
7811                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7812                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7813                 let secp_ctx = Secp256k1::new();
7814                 let seed = [42; 32];
7815                 let network = Network::Testnet;
7816                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7817
7818                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7819                 let config = UserConfig::default();
7820                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7821
7822                 // Now change the fee so we can check that the fee in the open_channel message is the
7823                 // same as the old fee.
7824                 fee_est.fee_est = 500;
7825                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7826                 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7827         }
7828
7829         #[test]
7830         fn test_holder_vs_counterparty_dust_limit() {
7831                 // Test that when calculating the local and remote commitment transaction fees, the correct
7832                 // dust limits are used.
7833                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7834                 let secp_ctx = Secp256k1::new();
7835                 let seed = [42; 32];
7836                 let network = Network::Testnet;
7837                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7838                 let logger = test_utils::TestLogger::new();
7839                 let best_block = BestBlock::from_network(network);
7840
7841                 // Go through the flow of opening a channel between two nodes, making sure
7842                 // they have different dust limits.
7843
7844                 // Create Node A's channel pointing to Node B's pubkey
7845                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7846                 let config = UserConfig::default();
7847                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7848
7849                 // Create Node B's channel by receiving Node A's open_channel message
7850                 // Make sure A's dust limit is as we expect.
7851                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7852                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7853                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7854
7855                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7856                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
7857                 accept_channel_msg.dust_limit_satoshis = 546;
7858                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7859                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7860
7861                 // Node A --> Node B: funding created
7862                 let output_script = node_a_chan.context.get_funding_redeemscript();
7863                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7864                         value: 10000000, script_pubkey: output_script.clone(),
7865                 }]};
7866                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7867                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7868                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7869
7870                 // Node B --> Node A: funding signed
7871                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7872
7873                 // Put some inbound and outbound HTLCs in A's channel.
7874                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7875                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7876                         htlc_id: 0,
7877                         amount_msat: htlc_amount_msat,
7878                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7879                         cltv_expiry: 300000000,
7880                         state: InboundHTLCState::Committed,
7881                 });
7882
7883                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7884                         htlc_id: 1,
7885                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7886                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7887                         cltv_expiry: 200000000,
7888                         state: OutboundHTLCState::Committed,
7889                         source: HTLCSource::OutboundRoute {
7890                                 path: Path { hops: Vec::new(), blinded_tail: None },
7891                                 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7892                                 first_hop_htlc_msat: 548,
7893                                 payment_id: PaymentId([42; 32]),
7894                         },
7895                         skimmed_fee_msat: None,
7896                 });
7897
7898                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7899                 // the dust limit check.
7900                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7901                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7902                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
7903                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7904
7905                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7906                 // of the HTLCs are seen to be above the dust limit.
7907                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7908                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
7909                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7910                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7911                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7912         }
7913
7914         #[test]
7915         fn test_timeout_vs_success_htlc_dust_limit() {
7916                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7917                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7918                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7919                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7920                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7921                 let secp_ctx = Secp256k1::new();
7922                 let seed = [42; 32];
7923                 let network = Network::Testnet;
7924                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7925
7926                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7927                 let config = UserConfig::default();
7928                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7929
7930                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
7931                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
7932
7933                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7934                 // counted as dust when it shouldn't be.
7935                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7936                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7937                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7938                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7939
7940                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7941                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7942                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7943                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7944                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7945
7946                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7947
7948                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7949                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7950                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7951                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7952                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7953
7954                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7955                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7956                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7957                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7958                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7959         }
7960
7961         #[test]
7962         fn channel_reestablish_no_updates() {
7963                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7964                 let logger = test_utils::TestLogger::new();
7965                 let secp_ctx = Secp256k1::new();
7966                 let seed = [42; 32];
7967                 let network = Network::Testnet;
7968                 let best_block = BestBlock::from_network(network);
7969                 let chain_hash = ChainHash::using_genesis_block(network);
7970                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7971
7972                 // Go through the flow of opening a channel between two nodes.
7973
7974                 // Create Node A's channel pointing to Node B's pubkey
7975                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7976                 let config = UserConfig::default();
7977                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7978
7979                 // Create Node B's channel by receiving Node A's open_channel message
7980                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
7981                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7982                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7983
7984                 // Node B --> Node A: accept channel
7985                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
7986                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7987
7988                 // Node A --> Node B: funding created
7989                 let output_script = node_a_chan.context.get_funding_redeemscript();
7990                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7991                         value: 10000000, script_pubkey: output_script.clone(),
7992                 }]};
7993                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7994                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7995                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7996
7997                 // Node B --> Node A: funding signed
7998                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7999
8000                 // Now disconnect the two nodes and check that the commitment point in
8001                 // Node B's channel_reestablish message is sane.
8002                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8003                 let msg = node_b_chan.get_channel_reestablish(&&logger);
8004                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8005                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8006                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8007
8008                 // Check that the commitment point in Node A's channel_reestablish message
8009                 // is sane.
8010                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8011                 let msg = node_a_chan.get_channel_reestablish(&&logger);
8012                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8013                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8014                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8015         }
8016
8017         #[test]
8018         fn test_configured_holder_max_htlc_value_in_flight() {
8019                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8020                 let logger = test_utils::TestLogger::new();
8021                 let secp_ctx = Secp256k1::new();
8022                 let seed = [42; 32];
8023                 let network = Network::Testnet;
8024                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8025                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8026                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8027
8028                 let mut config_2_percent = UserConfig::default();
8029                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8030                 let mut config_99_percent = UserConfig::default();
8031                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8032                 let mut config_0_percent = UserConfig::default();
8033                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8034                 let mut config_101_percent = UserConfig::default();
8035                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8036
8037                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8038                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8039                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8040                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
8041                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8042                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8043
8044                 // Test with the upper bound - 1 of valid values (99%).
8045                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
8046                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8047                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8048
8049                 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8050
8051                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8052                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8053                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8054                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8055                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8056                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8057
8058                 // Test with the upper bound - 1 of valid values (99%).
8059                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8060                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8061                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8062
8063                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8064                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8065                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
8066                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8067                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8068
8069                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8070                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8071                 // than 100.
8072                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
8073                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8074                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8075
8076                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8077                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8078                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8079                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8080                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8081
8082                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8083                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8084                 // than 100.
8085                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8086                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8087                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8088         }
8089
8090         #[test]
8091         fn test_configured_holder_selected_channel_reserve_satoshis() {
8092
8093                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8094                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8095                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8096
8097                 // Test with valid but unreasonably high channel reserves
8098                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8099                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8100                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8101
8102                 // Test with calculated channel reserve less than lower bound
8103                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8104                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8105
8106                 // Test with invalid channel reserves since sum of both is greater than or equal
8107                 // to channel value
8108                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8109                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8110         }
8111
8112         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8113                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8114                 let logger = test_utils::TestLogger::new();
8115                 let secp_ctx = Secp256k1::new();
8116                 let seed = [42; 32];
8117                 let network = Network::Testnet;
8118                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8119                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8120                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8121
8122
8123                 let mut outbound_node_config = UserConfig::default();
8124                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8125                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
8126
8127                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8128                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8129
8130                 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8131                 let mut inbound_node_config = UserConfig::default();
8132                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8133
8134                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8135                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8136
8137                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8138
8139                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8140                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8141                 } else {
8142                         // Channel Negotiations failed
8143                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8144                         assert!(result.is_err());
8145                 }
8146         }
8147
8148         #[test]
8149         fn channel_update() {
8150                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8151                 let logger = test_utils::TestLogger::new();
8152                 let secp_ctx = Secp256k1::new();
8153                 let seed = [42; 32];
8154                 let network = Network::Testnet;
8155                 let best_block = BestBlock::from_network(network);
8156                 let chain_hash = ChainHash::using_genesis_block(network);
8157                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8158
8159                 // Create Node A's channel pointing to Node B's pubkey
8160                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8161                 let config = UserConfig::default();
8162                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8163
8164                 // Create Node B's channel by receiving Node A's open_channel message
8165                 // Make sure A's dust limit is as we expect.
8166                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8167                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8168                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8169
8170                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8171                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8172                 accept_channel_msg.dust_limit_satoshis = 546;
8173                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8174                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8175
8176                 // Node A --> Node B: funding created
8177                 let output_script = node_a_chan.context.get_funding_redeemscript();
8178                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8179                         value: 10000000, script_pubkey: output_script.clone(),
8180                 }]};
8181                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8182                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8183                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8184
8185                 // Node B --> Node A: funding signed
8186                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8187
8188                 // Make sure that receiving a channel update will update the Channel as expected.
8189                 let update = ChannelUpdate {
8190                         contents: UnsignedChannelUpdate {
8191                                 chain_hash,
8192                                 short_channel_id: 0,
8193                                 timestamp: 0,
8194                                 flags: 0,
8195                                 cltv_expiry_delta: 100,
8196                                 htlc_minimum_msat: 5,
8197                                 htlc_maximum_msat: MAX_VALUE_MSAT,
8198                                 fee_base_msat: 110,
8199                                 fee_proportional_millionths: 11,
8200                                 excess_data: Vec::new(),
8201                         },
8202                         signature: Signature::from(unsafe { FFISignature::new() })
8203                 };
8204                 assert!(node_a_chan.channel_update(&update).unwrap());
8205
8206                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8207                 // change our official htlc_minimum_msat.
8208                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8209                 match node_a_chan.context.counterparty_forwarding_info() {
8210                         Some(info) => {
8211                                 assert_eq!(info.cltv_expiry_delta, 100);
8212                                 assert_eq!(info.fee_base_msat, 110);
8213                                 assert_eq!(info.fee_proportional_millionths, 11);
8214                         },
8215                         None => panic!("expected counterparty forwarding info to be Some")
8216                 }
8217
8218                 assert!(!node_a_chan.channel_update(&update).unwrap());
8219         }
8220
8221         #[cfg(feature = "_test_vectors")]
8222         #[test]
8223         fn outbound_commitment_test() {
8224                 use bitcoin::util::sighash;
8225                 use bitcoin::consensus::encode::serialize;
8226                 use bitcoin::blockdata::transaction::EcdsaSighashType;
8227                 use bitcoin::hashes::hex::FromHex;
8228                 use bitcoin::hash_types::Txid;
8229                 use bitcoin::secp256k1::Message;
8230                 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
8231                 use crate::ln::PaymentPreimage;
8232                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8233                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8234                 use crate::util::logger::Logger;
8235                 use crate::sync::Arc;
8236
8237                 // Test vectors from BOLT 3 Appendices C and F (anchors):
8238                 let feeest = TestFeeEstimator{fee_est: 15000};
8239                 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8240                 let secp_ctx = Secp256k1::new();
8241
8242                 let mut signer = InMemorySigner::new(
8243                         &secp_ctx,
8244                         SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8245                         SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8246                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8247                         SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8248                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8249
8250                         // These aren't set in the test vectors:
8251                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8252                         10_000_000,
8253                         [0; 32],
8254                         [0; 32],
8255                 );
8256
8257                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8258                                 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8259                 let keys_provider = Keys { signer: signer.clone() };
8260
8261                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8262                 let mut config = UserConfig::default();
8263                 config.channel_handshake_config.announced_channel = false;
8264                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
8265                 chan.context.holder_dust_limit_satoshis = 546;
8266                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8267
8268                 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8269
8270                 let counterparty_pubkeys = ChannelPublicKeys {
8271                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8272                         revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8273                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8274                         delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8275                         htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8276                 };
8277                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8278                         CounterpartyChannelTransactionParameters {
8279                                 pubkeys: counterparty_pubkeys.clone(),
8280                                 selected_contest_delay: 144
8281                         });
8282                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8283                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8284
8285                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8286                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8287
8288                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8289                            hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8290
8291                 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8292                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8293
8294                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8295                 // derived from a commitment_seed, so instead we copy it here and call
8296                 // build_commitment_transaction.
8297                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8298                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8299                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8300                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8301                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8302
8303                 macro_rules! test_commitment {
8304                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8305                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8306                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8307                         };
8308                 }
8309
8310                 macro_rules! test_commitment_with_anchors {
8311                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8312                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8313                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8314                         };
8315                 }
8316
8317                 macro_rules! test_commitment_common {
8318                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8319                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8320                         } ) => { {
8321                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8322                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8323
8324                                         let htlcs = commitment_stats.htlcs_included.drain(..)
8325                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8326                                                 .collect();
8327                                         (commitment_stats.tx, htlcs)
8328                                 };
8329                                 let trusted_tx = commitment_tx.trust();
8330                                 let unsigned_tx = trusted_tx.built_transaction();
8331                                 let redeemscript = chan.context.get_funding_redeemscript();
8332                                 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8333                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8334                                 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8335                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8336
8337                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8338                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8339                                 let mut counterparty_htlc_sigs = Vec::new();
8340                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8341                                 $({
8342                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8343                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8344                                         counterparty_htlc_sigs.push(remote_signature);
8345                                 })*
8346                                 assert_eq!(htlcs.len(), per_htlc.len());
8347
8348                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
8349                                         commitment_tx.clone(),
8350                                         counterparty_signature,
8351                                         counterparty_htlc_sigs,
8352                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8353                                         chan.context.counterparty_funding_pubkey()
8354                                 );
8355                                 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8356                                 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8357
8358                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
8359                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8360                                 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8361
8362                                 // ((htlc, counterparty_sig), (index, holder_sig))
8363                                 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8364
8365                                 $({
8366                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
8367                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8368
8369                                         let ref htlc = htlcs[$htlc_idx];
8370                                         let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8371                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8372                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8373                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8374                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8375                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8376                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8377
8378                                         let mut preimage: Option<PaymentPreimage> = None;
8379                                         if !htlc.offered {
8380                                                 for i in 0..5 {
8381                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8382                                                         if out == htlc.payment_hash {
8383                                                                 preimage = Some(PaymentPreimage([i; 32]));
8384                                                         }
8385                                                 }
8386
8387                                                 assert!(preimage.is_some());
8388                                         }
8389
8390                                         let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8391                                         let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8392                                                 channel_derivation_parameters: ChannelDerivationParameters {
8393                                                         value_satoshis: chan.context.channel_value_satoshis,
8394                                                         keys_id: chan.context.channel_keys_id,
8395                                                         transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8396                                                 },
8397                                                 commitment_txid: trusted_tx.txid(),
8398                                                 per_commitment_number: trusted_tx.commitment_number(),
8399                                                 per_commitment_point: trusted_tx.per_commitment_point(),
8400                                                 feerate_per_kw: trusted_tx.feerate_per_kw(),
8401                                                 htlc: htlc.clone(),
8402                                                 preimage: preimage.clone(),
8403                                                 counterparty_sig: *htlc_counterparty_sig,
8404                                         }, &secp_ctx).unwrap();
8405                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8406                                         assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8407
8408                                         let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8409                                         assert_eq!(signature, htlc_holder_sig, "htlc sig");
8410                                         let trusted_tx = holder_commitment_tx.trust();
8411                                         htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8412                                         log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
8413                                         assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8414                                 })*
8415                                 assert!(htlc_counterparty_sig_iter.next().is_none());
8416                         } }
8417                 }
8418
8419                 // anchors: simple commitment tx with no HTLCs and single anchor
8420                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8421                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8422                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8423
8424                 // simple commitment tx with no HTLCs
8425                 chan.context.value_to_self_msat = 7000000000;
8426
8427                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8428                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8429                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8430
8431                 // anchors: simple commitment tx with no HTLCs
8432                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8433                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8434                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8435
8436                 chan.context.pending_inbound_htlcs.push({
8437                         let mut out = InboundHTLCOutput{
8438                                 htlc_id: 0,
8439                                 amount_msat: 1000000,
8440                                 cltv_expiry: 500,
8441                                 payment_hash: PaymentHash([0; 32]),
8442                                 state: InboundHTLCState::Committed,
8443                         };
8444                         out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8445                         out
8446                 });
8447                 chan.context.pending_inbound_htlcs.push({
8448                         let mut out = InboundHTLCOutput{
8449                                 htlc_id: 1,
8450                                 amount_msat: 2000000,
8451                                 cltv_expiry: 501,
8452                                 payment_hash: PaymentHash([0; 32]),
8453                                 state: InboundHTLCState::Committed,
8454                         };
8455                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8456                         out
8457                 });
8458                 chan.context.pending_outbound_htlcs.push({
8459                         let mut out = OutboundHTLCOutput{
8460                                 htlc_id: 2,
8461                                 amount_msat: 2000000,
8462                                 cltv_expiry: 502,
8463                                 payment_hash: PaymentHash([0; 32]),
8464                                 state: OutboundHTLCState::Committed,
8465                                 source: HTLCSource::dummy(),
8466                                 skimmed_fee_msat: None,
8467                         };
8468                         out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8469                         out
8470                 });
8471                 chan.context.pending_outbound_htlcs.push({
8472                         let mut out = OutboundHTLCOutput{
8473                                 htlc_id: 3,
8474                                 amount_msat: 3000000,
8475                                 cltv_expiry: 503,
8476                                 payment_hash: PaymentHash([0; 32]),
8477                                 state: OutboundHTLCState::Committed,
8478                                 source: HTLCSource::dummy(),
8479                                 skimmed_fee_msat: None,
8480                         };
8481                         out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8482                         out
8483                 });
8484                 chan.context.pending_inbound_htlcs.push({
8485                         let mut out = InboundHTLCOutput{
8486                                 htlc_id: 4,
8487                                 amount_msat: 4000000,
8488                                 cltv_expiry: 504,
8489                                 payment_hash: PaymentHash([0; 32]),
8490                                 state: InboundHTLCState::Committed,
8491                         };
8492                         out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8493                         out
8494                 });
8495
8496                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8497                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8498                 chan.context.feerate_per_kw = 0;
8499
8500                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8501                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8502                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8503
8504                                   { 0,
8505                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8506                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8507                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8508
8509                                   { 1,
8510                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8511                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8512                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8513
8514                                   { 2,
8515                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8516                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8517                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8518
8519                                   { 3,
8520                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8521                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8522                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8523
8524                                   { 4,
8525                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8526                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8527                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8528                 } );
8529
8530                 // commitment tx with seven outputs untrimmed (maximum feerate)
8531                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8532                 chan.context.feerate_per_kw = 647;
8533
8534                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8535                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8536                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8537
8538                                   { 0,
8539                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8540                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8541                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8542
8543                                   { 1,
8544                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8545                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8546                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8547
8548                                   { 2,
8549                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8550                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8551                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8552
8553                                   { 3,
8554                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8555                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8556                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8557
8558                                   { 4,
8559                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8560                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8561                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8562                 } );
8563
8564                 // commitment tx with six outputs untrimmed (minimum feerate)
8565                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8566                 chan.context.feerate_per_kw = 648;
8567
8568                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8569                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8570                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8571
8572                                   { 0,
8573                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8574                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8575                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8576
8577                                   { 1,
8578                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8579                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8580                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8581
8582                                   { 2,
8583                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8584                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8585                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8586
8587                                   { 3,
8588                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8589                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8590                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8591                 } );
8592
8593                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8594                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8595                 chan.context.feerate_per_kw = 645;
8596                 chan.context.holder_dust_limit_satoshis = 1001;
8597
8598                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8599                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8600                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8601
8602                                   { 0,
8603                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8604                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8605                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8606
8607                                   { 1,
8608                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8609                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8610                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8611
8612                                   { 2,
8613                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8614                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8615                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8616
8617                                   { 3,
8618                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8619                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8620                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8621                 } );
8622
8623                 // commitment tx with six outputs untrimmed (maximum feerate)
8624                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8625                 chan.context.feerate_per_kw = 2069;
8626                 chan.context.holder_dust_limit_satoshis = 546;
8627
8628                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8629                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8630                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8631
8632                                   { 0,
8633                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8634                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8635                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8636
8637                                   { 1,
8638                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8639                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8640                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8641
8642                                   { 2,
8643                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8644                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8645                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8646
8647                                   { 3,
8648                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8649                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8650                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8651                 } );
8652
8653                 // commitment tx with five outputs untrimmed (minimum feerate)
8654                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8655                 chan.context.feerate_per_kw = 2070;
8656
8657                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8658                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8659                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8660
8661                                   { 0,
8662                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8663                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8664                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8665
8666                                   { 1,
8667                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8668                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8669                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8670
8671                                   { 2,
8672                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8673                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8674                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8675                 } );
8676
8677                 // commitment tx with five outputs untrimmed (maximum feerate)
8678                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8679                 chan.context.feerate_per_kw = 2194;
8680
8681                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8682                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8683                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8684
8685                                   { 0,
8686                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8687                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8688                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8689
8690                                   { 1,
8691                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8692                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8693                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8694
8695                                   { 2,
8696                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8697                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8698                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8699                 } );
8700
8701                 // commitment tx with four outputs untrimmed (minimum feerate)
8702                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8703                 chan.context.feerate_per_kw = 2195;
8704
8705                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8706                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8707                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8708
8709                                   { 0,
8710                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8711                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8712                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8713
8714                                   { 1,
8715                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8716                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8717                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8718                 } );
8719
8720                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8721                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8722                 chan.context.feerate_per_kw = 2185;
8723                 chan.context.holder_dust_limit_satoshis = 2001;
8724                 let cached_channel_type = chan.context.channel_type;
8725                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8726
8727                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8728                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8729                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8730
8731                                   { 0,
8732                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8733                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8734                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8735
8736                                   { 1,
8737                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8738                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8739                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8740                 } );
8741
8742                 // commitment tx with four outputs untrimmed (maximum feerate)
8743                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8744                 chan.context.feerate_per_kw = 3702;
8745                 chan.context.holder_dust_limit_satoshis = 546;
8746                 chan.context.channel_type = cached_channel_type.clone();
8747
8748                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8749                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8750                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8751
8752                                   { 0,
8753                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8754                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8755                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8756
8757                                   { 1,
8758                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8759                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8760                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8761                 } );
8762
8763                 // commitment tx with three outputs untrimmed (minimum feerate)
8764                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8765                 chan.context.feerate_per_kw = 3703;
8766
8767                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8768                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8769                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8770
8771                                   { 0,
8772                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8773                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8774                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8775                 } );
8776
8777                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8778                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8779                 chan.context.feerate_per_kw = 3687;
8780                 chan.context.holder_dust_limit_satoshis = 3001;
8781                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8782
8783                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8784                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8785                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8786
8787                                   { 0,
8788                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8789                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8790                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8791                 } );
8792
8793                 // commitment tx with three outputs untrimmed (maximum feerate)
8794                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8795                 chan.context.feerate_per_kw = 4914;
8796                 chan.context.holder_dust_limit_satoshis = 546;
8797                 chan.context.channel_type = cached_channel_type.clone();
8798
8799                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8800                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8801                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8802
8803                                   { 0,
8804                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8805                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8806                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8807                 } );
8808
8809                 // commitment tx with two outputs untrimmed (minimum feerate)
8810                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8811                 chan.context.feerate_per_kw = 4915;
8812                 chan.context.holder_dust_limit_satoshis = 546;
8813
8814                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8815                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8816                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8817
8818                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8819                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8820                 chan.context.feerate_per_kw = 4894;
8821                 chan.context.holder_dust_limit_satoshis = 4001;
8822                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8823
8824                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8825                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8826                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8827
8828                 // commitment tx with two outputs untrimmed (maximum feerate)
8829                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8830                 chan.context.feerate_per_kw = 9651180;
8831                 chan.context.holder_dust_limit_satoshis = 546;
8832                 chan.context.channel_type = cached_channel_type.clone();
8833
8834                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8835                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8836                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8837
8838                 // commitment tx with one output untrimmed (minimum feerate)
8839                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8840                 chan.context.feerate_per_kw = 9651181;
8841
8842                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8843                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8844                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8845
8846                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8847                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8848                 chan.context.feerate_per_kw = 6216010;
8849                 chan.context.holder_dust_limit_satoshis = 4001;
8850                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8851
8852                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8853                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8854                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8855
8856                 // commitment tx with fee greater than funder amount
8857                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8858                 chan.context.feerate_per_kw = 9651936;
8859                 chan.context.holder_dust_limit_satoshis = 546;
8860                 chan.context.channel_type = cached_channel_type;
8861
8862                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8863                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8864                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8865
8866                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8867                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8868                 chan.context.feerate_per_kw = 253;
8869                 chan.context.pending_inbound_htlcs.clear();
8870                 chan.context.pending_inbound_htlcs.push({
8871                         let mut out = InboundHTLCOutput{
8872                                 htlc_id: 1,
8873                                 amount_msat: 2000000,
8874                                 cltv_expiry: 501,
8875                                 payment_hash: PaymentHash([0; 32]),
8876                                 state: InboundHTLCState::Committed,
8877                         };
8878                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8879                         out
8880                 });
8881                 chan.context.pending_outbound_htlcs.clear();
8882                 chan.context.pending_outbound_htlcs.push({
8883                         let mut out = OutboundHTLCOutput{
8884                                 htlc_id: 6,
8885                                 amount_msat: 5000001,
8886                                 cltv_expiry: 506,
8887                                 payment_hash: PaymentHash([0; 32]),
8888                                 state: OutboundHTLCState::Committed,
8889                                 source: HTLCSource::dummy(),
8890                                 skimmed_fee_msat: None,
8891                         };
8892                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8893                         out
8894                 });
8895                 chan.context.pending_outbound_htlcs.push({
8896                         let mut out = OutboundHTLCOutput{
8897                                 htlc_id: 5,
8898                                 amount_msat: 5000000,
8899                                 cltv_expiry: 505,
8900                                 payment_hash: PaymentHash([0; 32]),
8901                                 state: OutboundHTLCState::Committed,
8902                                 source: HTLCSource::dummy(),
8903                                 skimmed_fee_msat: None,
8904                         };
8905                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8906                         out
8907                 });
8908
8909                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8910                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8911                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8912
8913                                   { 0,
8914                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8915                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8916                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8917                                   { 1,
8918                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8919                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8920                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8921                                   { 2,
8922                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8923                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8924                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8925                 } );
8926
8927                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8928                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8929                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8930                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8931
8932                                   { 0,
8933                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8934                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8935                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8936                                   { 1,
8937                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8938                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8939                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8940                                   { 2,
8941                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8942                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8943                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8944                 } );
8945         }
8946
8947         #[test]
8948         fn test_per_commitment_secret_gen() {
8949                 // Test vectors from BOLT 3 Appendix D:
8950
8951                 let mut seed = [0; 32];
8952                 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8953                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8954                            hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8955
8956                 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8957                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8958                            hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8959
8960                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8961                            hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8962
8963                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8964                            hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8965
8966                 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8967                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8968                            hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
8969         }
8970
8971         #[test]
8972         fn test_key_derivation() {
8973                 // Test vectors from BOLT 3 Appendix E:
8974                 let secp_ctx = Secp256k1::new();
8975
8976                 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
8977                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8978
8979                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
8980                 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
8981
8982                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8983                 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
8984
8985                 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8986                                 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
8987
8988                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
8989                                 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
8990
8991                 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8992                                 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
8993
8994                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
8995                                 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
8996         }
8997
8998         #[test]
8999         fn test_zero_conf_channel_type_support() {
9000                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9001                 let secp_ctx = Secp256k1::new();
9002                 let seed = [42; 32];
9003                 let network = Network::Testnet;
9004                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9005                 let logger = test_utils::TestLogger::new();
9006
9007                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9008                 let config = UserConfig::default();
9009                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9010                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
9011
9012                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9013                 channel_type_features.set_zero_conf_required();
9014
9015                 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9016                 open_channel_msg.channel_type = Some(channel_type_features);
9017                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9018                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9019                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9020                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9021                 assert!(res.is_ok());
9022         }
9023
9024         #[test]
9025         fn test_supports_anchors_zero_htlc_tx_fee() {
9026                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9027                 // resulting `channel_type`.
9028                 let secp_ctx = Secp256k1::new();
9029                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9030                 let network = Network::Testnet;
9031                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9032                 let logger = test_utils::TestLogger::new();
9033
9034                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9035                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9036
9037                 let mut config = UserConfig::default();
9038                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9039
9040                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9041                 // need to signal it.
9042                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9043                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9044                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9045                         &config, 0, 42
9046                 ).unwrap();
9047                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9048
9049                 let mut expected_channel_type = ChannelTypeFeatures::empty();
9050                 expected_channel_type.set_static_remote_key_required();
9051                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9052
9053                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9054                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9055                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9056                 ).unwrap();
9057
9058                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9059                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9060                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9061                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9062                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9063                 ).unwrap();
9064
9065                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9066                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9067         }
9068
9069         #[test]
9070         fn test_rejects_implicit_simple_anchors() {
9071                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9072                 // each side's `InitFeatures`, it is rejected.
9073                 let secp_ctx = Secp256k1::new();
9074                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9075                 let network = Network::Testnet;
9076                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9077                 let logger = test_utils::TestLogger::new();
9078
9079                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9080                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9081
9082                 let config = UserConfig::default();
9083
9084                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9085                 let static_remote_key_required: u64 = 1 << 12;
9086                 let simple_anchors_required: u64 = 1 << 20;
9087                 let raw_init_features = static_remote_key_required | simple_anchors_required;
9088                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9089
9090                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9091                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9092                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9093                 ).unwrap();
9094
9095                 // Set `channel_type` to `None` to force the implicit feature negotiation.
9096                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9097                 open_channel_msg.channel_type = None;
9098
9099                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9100                 // `static_remote_key`, it will fail the channel.
9101                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9102                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9103                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9104                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9105                 );
9106                 assert!(channel_b.is_err());
9107         }
9108
9109         #[test]
9110         fn test_rejects_simple_anchors_channel_type() {
9111                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9112                 // it is rejected.
9113                 let secp_ctx = Secp256k1::new();
9114                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9115                 let network = Network::Testnet;
9116                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9117                 let logger = test_utils::TestLogger::new();
9118
9119                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9120                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9121
9122                 let config = UserConfig::default();
9123
9124                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9125                 let static_remote_key_required: u64 = 1 << 12;
9126                 let simple_anchors_required: u64 = 1 << 20;
9127                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9128                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9129                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9130                 assert!(!simple_anchors_init.requires_unknown_bits());
9131                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9132
9133                 // First, we'll try to open a channel between A and B where A requests a channel type for
9134                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9135                 // B as it's not supported by LDK.
9136                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9137                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9138                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9139                 ).unwrap();
9140
9141                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9142                 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9143
9144                 let res = InboundV1Channel::<&TestKeysInterface>::new(
9145                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9146                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9147                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9148                 );
9149                 assert!(res.is_err());
9150
9151                 // Then, we'll try to open another channel where A requests a channel type for
9152                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9153                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9154                 // LDK.
9155                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9156                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9157                         10000000, 100000, 42, &config, 0, 42
9158                 ).unwrap();
9159
9160                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9161
9162                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9163                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9164                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9165                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9166                 ).unwrap();
9167
9168                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9169                 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9170
9171                 let res = channel_a.accept_channel(
9172                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9173                 );
9174                 assert!(res.is_err());
9175         }
9176
9177         #[test]
9178         fn test_waiting_for_batch() {
9179                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9180                 let logger = test_utils::TestLogger::new();
9181                 let secp_ctx = Secp256k1::new();
9182                 let seed = [42; 32];
9183                 let network = Network::Testnet;
9184                 let best_block = BestBlock::from_network(network);
9185                 let chain_hash = ChainHash::using_genesis_block(network);
9186                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9187
9188                 let mut config = UserConfig::default();
9189                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9190                 // channel in a batch before all channels are ready.
9191                 config.channel_handshake_limits.trust_own_funding_0conf = true;
9192
9193                 // Create a channel from node a to node b that will be part of batch funding.
9194                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9195                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9196                         &feeest,
9197                         &&keys_provider,
9198                         &&keys_provider,
9199                         node_b_node_id,
9200                         &channelmanager::provided_init_features(&config),
9201                         10000000,
9202                         100000,
9203                         42,
9204                         &config,
9205                         0,
9206                         42,
9207                 ).unwrap();
9208
9209                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9210                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9211                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9212                         &feeest,
9213                         &&keys_provider,
9214                         &&keys_provider,
9215                         node_b_node_id,
9216                         &channelmanager::provided_channel_type_features(&config),
9217                         &channelmanager::provided_init_features(&config),
9218                         &open_channel_msg,
9219                         7,
9220                         &config,
9221                         0,
9222                         &&logger,
9223                         true,  // Allow node b to send a 0conf channel_ready.
9224                 ).unwrap();
9225
9226                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9227                 node_a_chan.accept_channel(
9228                         &accept_channel_msg,
9229                         &config.channel_handshake_limits,
9230                         &channelmanager::provided_init_features(&config),
9231                 ).unwrap();
9232
9233                 // Fund the channel with a batch funding transaction.
9234                 let output_script = node_a_chan.context.get_funding_redeemscript();
9235                 let tx = Transaction {
9236                         version: 1,
9237                         lock_time: PackedLockTime::ZERO,
9238                         input: Vec::new(),
9239                         output: vec![
9240                                 TxOut {
9241                                         value: 10000000, script_pubkey: output_script.clone(),
9242                                 },
9243                                 TxOut {
9244                                         value: 10000000, script_pubkey: Builder::new().into_script(),
9245                                 },
9246                         ]};
9247                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9248                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9249                         tx.clone(),
9250                         funding_outpoint,
9251                         true,
9252                         &&logger,
9253                 ).map_err(|_| ()).unwrap();
9254                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9255                         &funding_created_msg,
9256                         best_block,
9257                         &&keys_provider,
9258                         &&logger,
9259                 ).map_err(|_| ()).unwrap();
9260                 let node_b_updates = node_b_chan.monitor_updating_restored(
9261                         &&logger,
9262                         &&keys_provider,
9263                         chain_hash,
9264                         &config,
9265                         0,
9266                 );
9267
9268                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9269                 // broadcasting the funding transaction until the batch is ready.
9270                 let _ = node_a_chan.funding_signed(
9271                         &funding_signed_msg,
9272                         best_block,
9273                         &&keys_provider,
9274                         &&logger,
9275                 ).unwrap();
9276                 let node_a_updates = node_a_chan.monitor_updating_restored(
9277                         &&logger,
9278                         &&keys_provider,
9279                         chain_hash,
9280                         &config,
9281                         0,
9282                 );
9283                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9284                 // as the funding transaction depends on all channels in the batch becoming ready.
9285                 assert!(node_a_updates.channel_ready.is_none());
9286                 assert!(node_a_updates.funding_broadcastable.is_none());
9287                 assert_eq!(
9288                         node_a_chan.context.channel_state,
9289                         ChannelState::FundingSent as u32 |
9290                         ChannelState::WaitingForBatch as u32,
9291                 );
9292
9293                 // It is possible to receive a 0conf channel_ready from the remote node.
9294                 node_a_chan.channel_ready(
9295                         &node_b_updates.channel_ready.unwrap(),
9296                         &&keys_provider,
9297                         chain_hash,
9298                         &config,
9299                         &best_block,
9300                         &&logger,
9301                 ).unwrap();
9302                 assert_eq!(
9303                         node_a_chan.context.channel_state,
9304                         ChannelState::FundingSent as u32 |
9305                         ChannelState::WaitingForBatch as u32 |
9306                         ChannelState::TheirChannelReady as u32,
9307                 );
9308
9309                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9310                 node_a_chan.set_batch_ready();
9311                 assert_eq!(
9312                         node_a_chan.context.channel_state,
9313                         ChannelState::FundingSent as u32 |
9314                         ChannelState::TheirChannelReady as u32,
9315                 );
9316                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
9317         }
9318 }