Revert "Remove AvailableBalances::balance_msat"
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::script::{Script,Builder};
11 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
12 use bitcoin::util::sighash;
13 use bitcoin::consensus::encode;
14
15 use bitcoin::hashes::Hash;
16 use bitcoin::hashes::sha256::Hash as Sha256;
17 use bitcoin::hashes::sha256d::Hash as Sha256d;
18 use bitcoin::hash_types::{Txid, BlockHash};
19
20 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
21 use bitcoin::secp256k1::{PublicKey,SecretKey};
22 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
23 use bitcoin::secp256k1;
24
25 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
26 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
27 use crate::ln::msgs;
28 use crate::ln::msgs::DecodeError;
29 use crate::ln::script::{self, ShutdownScript};
30 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
31 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
32 use crate::ln::chan_utils;
33 use crate::ln::onion_utils::HTLCFailReason;
34 use crate::chain::BestBlock;
35 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
36 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
37 use crate::chain::transaction::{OutPoint, TransactionData};
38 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
39 use crate::events::ClosureReason;
40 use crate::routing::gossip::NodeId;
41 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
42 use crate::util::logger::Logger;
43 use crate::util::errors::APIError;
44 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
45 use crate::util::scid_utils::scid_from_parts;
46
47 use crate::io;
48 use crate::prelude::*;
49 use core::{cmp,mem,fmt};
50 use core::ops::Deref;
51 #[cfg(any(test, fuzzing, debug_assertions))]
52 use crate::sync::Mutex;
53 use bitcoin::hashes::hex::ToHex;
54 use crate::sign::type_resolver::ChannelSignerType;
55
56 #[cfg(test)]
57 pub struct ChannelValueStat {
58         pub value_to_self_msat: u64,
59         pub channel_value_msat: u64,
60         pub channel_reserve_msat: u64,
61         pub pending_outbound_htlcs_amount_msat: u64,
62         pub pending_inbound_htlcs_amount_msat: u64,
63         pub holding_cell_outbound_amount_msat: u64,
64         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
65         pub counterparty_dust_limit_msat: u64,
66 }
67
68 pub struct AvailableBalances {
69         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
70         pub balance_msat: u64,
71         /// Total amount available for our counterparty to send to us.
72         pub inbound_capacity_msat: u64,
73         /// Total amount available for us to send to our counterparty.
74         pub outbound_capacity_msat: u64,
75         /// The maximum value we can assign to the next outbound HTLC
76         pub next_outbound_htlc_limit_msat: u64,
77         /// The minimum value we can assign to the next outbound HTLC
78         pub next_outbound_htlc_minimum_msat: u64,
79 }
80
81 #[derive(Debug, Clone, Copy, PartialEq)]
82 enum FeeUpdateState {
83         // Inbound states mirroring InboundHTLCState
84         RemoteAnnounced,
85         AwaitingRemoteRevokeToAnnounce,
86         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
87         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
88         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
89         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
90         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
91
92         // Outbound state can only be `LocalAnnounced` or `Committed`
93         Outbound,
94 }
95
96 enum InboundHTLCRemovalReason {
97         FailRelay(msgs::OnionErrorPacket),
98         FailMalformed(([u8; 32], u16)),
99         Fulfill(PaymentPreimage),
100 }
101
102 enum InboundHTLCState {
103         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
104         /// update_add_htlc message for this HTLC.
105         RemoteAnnounced(PendingHTLCStatus),
106         /// Included in a received commitment_signed message (implying we've
107         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
108         /// state (see the example below). We have not yet included this HTLC in a
109         /// commitment_signed message because we are waiting on the remote's
110         /// aforementioned state revocation. One reason this missing remote RAA
111         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
112         /// is because every time we create a new "state", i.e. every time we sign a
113         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
114         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
115         /// sent provided the per_commitment_point for our current commitment tx.
116         /// The other reason we should not send a commitment_signed without their RAA
117         /// is because their RAA serves to ACK our previous commitment_signed.
118         ///
119         /// Here's an example of how an HTLC could come to be in this state:
120         /// remote --> update_add_htlc(prev_htlc)   --> local
121         /// remote --> commitment_signed(prev_htlc) --> local
122         /// remote <-- revoke_and_ack               <-- local
123         /// remote <-- commitment_signed(prev_htlc) <-- local
124         /// [note that here, the remote does not respond with a RAA]
125         /// remote --> update_add_htlc(this_htlc)   --> local
126         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
127         /// Now `this_htlc` will be assigned this state. It's unable to be officially
128         /// accepted, i.e. included in a commitment_signed, because we're missing the
129         /// RAA that provides our next per_commitment_point. The per_commitment_point
130         /// is used to derive commitment keys, which are used to construct the
131         /// signatures in a commitment_signed message.
132         /// Implies AwaitingRemoteRevoke.
133         ///
134         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
135         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
136         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
137         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
138         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
139         /// channel (before it can then get forwarded and/or removed).
140         /// Implies AwaitingRemoteRevoke.
141         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
142         Committed,
143         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
144         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
145         /// we'll drop it.
146         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
147         /// commitment transaction without it as otherwise we'll have to force-close the channel to
148         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
149         /// anyway). That said, ChannelMonitor does this for us (see
150         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
151         /// our own local state before then, once we're sure that the next commitment_signed and
152         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
153         LocalRemoved(InboundHTLCRemovalReason),
154 }
155
156 struct InboundHTLCOutput {
157         htlc_id: u64,
158         amount_msat: u64,
159         cltv_expiry: u32,
160         payment_hash: PaymentHash,
161         state: InboundHTLCState,
162 }
163
164 enum OutboundHTLCState {
165         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
166         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
167         /// we will promote to Committed (note that they may not accept it until the next time we
168         /// revoke, but we don't really care about that:
169         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
170         ///    money back (though we won't), and,
171         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
172         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
173         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
174         ///    we'll never get out of sync).
175         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
176         /// OutboundHTLCOutput's size just for a temporary bit
177         LocalAnnounced(Box<msgs::OnionPacket>),
178         Committed,
179         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
180         /// the change (though they'll need to revoke before we fail the payment).
181         RemoteRemoved(OutboundHTLCOutcome),
182         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
183         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
184         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
185         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
186         /// remote revoke_and_ack on a previous state before we can do so.
187         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
188         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
192         /// revoke_and_ack to drop completely.
193         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
194 }
195
196 #[derive(Clone)]
197 enum OutboundHTLCOutcome {
198         /// LDK version 0.0.105+ will always fill in the preimage here.
199         Success(Option<PaymentPreimage>),
200         Failure(HTLCFailReason),
201 }
202
203 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
204         fn from(o: Option<HTLCFailReason>) -> Self {
205                 match o {
206                         None => OutboundHTLCOutcome::Success(None),
207                         Some(r) => OutboundHTLCOutcome::Failure(r)
208                 }
209         }
210 }
211
212 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
213         fn into(self) -> Option<&'a HTLCFailReason> {
214                 match self {
215                         OutboundHTLCOutcome::Success(_) => None,
216                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
217                 }
218         }
219 }
220
221 struct OutboundHTLCOutput {
222         htlc_id: u64,
223         amount_msat: u64,
224         cltv_expiry: u32,
225         payment_hash: PaymentHash,
226         state: OutboundHTLCState,
227         source: HTLCSource,
228         skimmed_fee_msat: Option<u64>,
229 }
230
231 /// See AwaitingRemoteRevoke ChannelState for more info
232 enum HTLCUpdateAwaitingACK {
233         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
234                 // always outbound
235                 amount_msat: u64,
236                 cltv_expiry: u32,
237                 payment_hash: PaymentHash,
238                 source: HTLCSource,
239                 onion_routing_packet: msgs::OnionPacket,
240                 // The extra fee we're skimming off the top of this HTLC.
241                 skimmed_fee_msat: Option<u64>,
242         },
243         ClaimHTLC {
244                 payment_preimage: PaymentPreimage,
245                 htlc_id: u64,
246         },
247         FailHTLC {
248                 htlc_id: u64,
249                 err_packet: msgs::OnionErrorPacket,
250         },
251 }
252
253 /// There are a few "states" and then a number of flags which can be applied:
254 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
255 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
256 /// move on to `ChannelReady`.
257 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
258 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
259 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
260 enum ChannelState {
261         /// Implies we have (or are prepared to) send our open_channel/accept_channel message
262         OurInitSent = 1 << 0,
263         /// Implies we have received their `open_channel`/`accept_channel` message
264         TheirInitSent = 1 << 1,
265         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
266         /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
267         /// upon receipt of `funding_created`, so simply skip this state.
268         FundingCreated = 4,
269         /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
270         /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
271         /// and our counterparty consider the funding transaction confirmed.
272         FundingSent = 8,
273         /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
274         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
275         TheirChannelReady = 1 << 4,
276         /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
277         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
278         OurChannelReady = 1 << 5,
279         ChannelReady = 64,
280         /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
281         /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
282         /// dance.
283         PeerDisconnected = 1 << 7,
284         /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
285         /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
286         /// sending any outbound messages until they've managed to finish.
287         MonitorUpdateInProgress = 1 << 8,
288         /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
289         /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
290         /// messages as then we will be unable to determine which HTLCs they included in their
291         /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
292         /// later.
293         /// Flag is set on `ChannelReady`.
294         AwaitingRemoteRevoke = 1 << 9,
295         /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
296         /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
297         /// to respond with our own shutdown message when possible.
298         RemoteShutdownSent = 1 << 10,
299         /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
300         /// point, we may not add any new HTLCs to the channel.
301         LocalShutdownSent = 1 << 11,
302         /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
303         /// to drop us, but we store this anyway.
304         ShutdownComplete = 4096,
305         /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
306         /// broadcasting of the funding transaction is being held until all channels in the batch
307         /// have received funding_signed and have their monitors persisted.
308         WaitingForBatch = 1 << 13,
309 }
310 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
311         ChannelState::LocalShutdownSent as u32 |
312         ChannelState::RemoteShutdownSent as u32;
313 const MULTI_STATE_FLAGS: u32 =
314         BOTH_SIDES_SHUTDOWN_MASK |
315         ChannelState::PeerDisconnected as u32 |
316         ChannelState::MonitorUpdateInProgress as u32;
317 const STATE_FLAGS: u32 =
318         MULTI_STATE_FLAGS |
319         ChannelState::TheirChannelReady as u32 |
320         ChannelState::OurChannelReady as u32 |
321         ChannelState::AwaitingRemoteRevoke as u32 |
322         ChannelState::WaitingForBatch as u32;
323
324 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
325
326 pub const DEFAULT_MAX_HTLCS: u16 = 50;
327
328 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
329         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
330         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
331         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
332 }
333
334 #[cfg(not(test))]
335 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
336 #[cfg(test)]
337 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
338
339 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
340
341 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
342 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
343 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
344 /// `holder_max_htlc_value_in_flight_msat`.
345 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
346
347 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
348 /// `option_support_large_channel` (aka wumbo channels) is not supported.
349 /// It's 2^24 - 1.
350 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
351
352 /// Total bitcoin supply in satoshis.
353 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
354
355 /// The maximum network dust limit for standard script formats. This currently represents the
356 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
357 /// transaction non-standard and thus refuses to relay it.
358 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
359 /// implementations use this value for their dust limit today.
360 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
361
362 /// The maximum channel dust limit we will accept from our counterparty.
363 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
364
365 /// The dust limit is used for both the commitment transaction outputs as well as the closing
366 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
367 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
368 /// In order to avoid having to concern ourselves with standardness during the closing process, we
369 /// simply require our counterparty to use a dust limit which will leave any segwit output
370 /// standard.
371 /// See <https://github.com/lightning/bolts/issues/905> for more details.
372 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
373
374 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
375 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
376
377 /// Used to return a simple Error back to ChannelManager. Will get converted to a
378 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
379 /// channel_id in ChannelManager.
380 pub(super) enum ChannelError {
381         Ignore(String),
382         Warn(String),
383         Close(String),
384 }
385
386 impl fmt::Debug for ChannelError {
387         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
388                 match self {
389                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
390                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
391                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
392                 }
393         }
394 }
395
396 impl fmt::Display for ChannelError {
397         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
398                 match self {
399                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
400                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
401                         &ChannelError::Close(ref e) => write!(f, "{}", e),
402                 }
403         }
404 }
405
406 macro_rules! secp_check {
407         ($res: expr, $err: expr) => {
408                 match $res {
409                         Ok(thing) => thing,
410                         Err(_) => return Err(ChannelError::Close($err)),
411                 }
412         };
413 }
414
415 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
416 /// our counterparty or not. However, we don't want to announce updates right away to avoid
417 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
418 /// our channel_update message and track the current state here.
419 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
420 #[derive(Clone, Copy, PartialEq)]
421 pub(super) enum ChannelUpdateStatus {
422         /// We've announced the channel as enabled and are connected to our peer.
423         Enabled,
424         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
425         DisabledStaged(u8),
426         /// Our channel is live again, but we haven't announced the channel as enabled yet.
427         EnabledStaged(u8),
428         /// We've announced the channel as disabled.
429         Disabled,
430 }
431
432 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
433 #[derive(PartialEq)]
434 pub enum AnnouncementSigsState {
435         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
436         /// we sent the last `AnnouncementSignatures`.
437         NotSent,
438         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
439         /// This state never appears on disk - instead we write `NotSent`.
440         MessageSent,
441         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
442         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
443         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
444         /// they send back a `RevokeAndACK`.
445         /// This state never appears on disk - instead we write `NotSent`.
446         Committed,
447         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
448         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
449         PeerReceived,
450 }
451
452 /// An enum indicating whether the local or remote side offered a given HTLC.
453 enum HTLCInitiator {
454         LocalOffered,
455         RemoteOffered,
456 }
457
458 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
459 struct HTLCStats {
460         pending_htlcs: u32,
461         pending_htlcs_value_msat: u64,
462         on_counterparty_tx_dust_exposure_msat: u64,
463         on_holder_tx_dust_exposure_msat: u64,
464         holding_cell_msat: u64,
465         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
466 }
467
468 /// An enum gathering stats on commitment transaction, either local or remote.
469 struct CommitmentStats<'a> {
470         tx: CommitmentTransaction, // the transaction info
471         feerate_per_kw: u32, // the feerate included to build the transaction
472         total_fee_sat: u64, // the total fee included in the transaction
473         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
474         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
475         local_balance_msat: u64, // local balance before fees but considering dust limits
476         remote_balance_msat: u64, // remote balance before fees but considering dust limits
477         preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
478 }
479
480 /// Used when calculating whether we or the remote can afford an additional HTLC.
481 struct HTLCCandidate {
482         amount_msat: u64,
483         origin: HTLCInitiator,
484 }
485
486 impl HTLCCandidate {
487         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
488                 Self {
489                         amount_msat,
490                         origin,
491                 }
492         }
493 }
494
495 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
496 /// description
497 enum UpdateFulfillFetch {
498         NewClaim {
499                 monitor_update: ChannelMonitorUpdate,
500                 htlc_value_msat: u64,
501                 msg: Option<msgs::UpdateFulfillHTLC>,
502         },
503         DuplicateClaim {},
504 }
505
506 /// The return type of get_update_fulfill_htlc_and_commit.
507 pub enum UpdateFulfillCommitFetch {
508         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
509         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
510         /// previously placed in the holding cell (and has since been removed).
511         NewClaim {
512                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
513                 monitor_update: ChannelMonitorUpdate,
514                 /// The value of the HTLC which was claimed, in msat.
515                 htlc_value_msat: u64,
516         },
517         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
518         /// or has been forgotten (presumably previously claimed).
519         DuplicateClaim {},
520 }
521
522 /// The return value of `monitor_updating_restored`
523 pub(super) struct MonitorRestoreUpdates {
524         pub raa: Option<msgs::RevokeAndACK>,
525         pub commitment_update: Option<msgs::CommitmentUpdate>,
526         pub order: RAACommitmentOrder,
527         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
528         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
529         pub finalized_claimed_htlcs: Vec<HTLCSource>,
530         pub funding_broadcastable: Option<Transaction>,
531         pub channel_ready: Option<msgs::ChannelReady>,
532         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
533 }
534
535 /// The return value of `channel_reestablish`
536 pub(super) struct ReestablishResponses {
537         pub channel_ready: Option<msgs::ChannelReady>,
538         pub raa: Option<msgs::RevokeAndACK>,
539         pub commitment_update: Option<msgs::CommitmentUpdate>,
540         pub order: RAACommitmentOrder,
541         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
542         pub shutdown_msg: Option<msgs::Shutdown>,
543 }
544
545 /// The return type of `force_shutdown`
546 ///
547 /// Contains a tuple with the following:
548 /// - An optional (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
549 /// - A list of HTLCs to fail back in the form of the (source, payment hash, and this channel's
550 /// counterparty_node_id and channel_id).
551 /// - An optional transaction id identifying a corresponding batch funding transaction.
552 pub(crate) type ShutdownResult = (
553         Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
554         Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
555         Option<Txid>
556 );
557
558 /// If the majority of the channels funds are to the fundee and the initiator holds only just
559 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
560 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
561 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
562 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
563 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
564 /// by this multiple without hitting this case, before sending.
565 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
566 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
567 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
568 /// leave the channel less usable as we hold a bigger reserve.
569 #[cfg(any(fuzzing, test))]
570 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
571 #[cfg(not(any(fuzzing, test)))]
572 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
573
574 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
575 /// channel creation on an inbound channel, we simply force-close and move on.
576 /// This constant is the one suggested in BOLT 2.
577 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
578
579 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
580 /// not have enough balance value remaining to cover the onchain cost of this new
581 /// HTLC weight. If this happens, our counterparty fails the reception of our
582 /// commitment_signed including this new HTLC due to infringement on the channel
583 /// reserve.
584 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
585 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
586 /// leads to a channel force-close. Ultimately, this is an issue coming from the
587 /// design of LN state machines, allowing asynchronous updates.
588 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
589
590 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
591 /// commitment transaction fees, with at least this many HTLCs present on the commitment
592 /// transaction (not counting the value of the HTLCs themselves).
593 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
594
595 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
596 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
597 /// ChannelUpdate prompted by the config update. This value was determined as follows:
598 ///
599 ///   * The expected interval between ticks (1 minute).
600 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
601 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
602 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
603 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
604
605 /// The number of ticks that may elapse while we're waiting for a response to a
606 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
607 /// them.
608 ///
609 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
610 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
611
612 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
613 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
614 /// exceeding this age limit will be force-closed and purged from memory.
615 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
616
617 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
618 pub(crate) const COINBASE_MATURITY: u32 = 100;
619
620 struct PendingChannelMonitorUpdate {
621         update: ChannelMonitorUpdate,
622 }
623
624 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
625         (0, update, required),
626 });
627
628 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
629 /// its variants containing an appropriate channel struct.
630 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
631         UnfundedOutboundV1(OutboundV1Channel<SP>),
632         UnfundedInboundV1(InboundV1Channel<SP>),
633         Funded(Channel<SP>),
634 }
635
636 impl<'a, SP: Deref> ChannelPhase<SP> where
637         SP::Target: SignerProvider,
638         <SP::Target as SignerProvider>::Signer: ChannelSigner,
639 {
640         pub fn context(&'a self) -> &'a ChannelContext<SP> {
641                 match self {
642                         ChannelPhase::Funded(chan) => &chan.context,
643                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
644                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
645                 }
646         }
647
648         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
649                 match self {
650                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
651                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
652                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
653                 }
654         }
655 }
656
657 /// Contains all state common to unfunded inbound/outbound channels.
658 pub(super) struct UnfundedChannelContext {
659         /// A counter tracking how many ticks have elapsed since this unfunded channel was
660         /// created. If this unfunded channel reaches peer has yet to respond after reaching
661         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
662         ///
663         /// This is so that we don't keep channels around that haven't progressed to a funded state
664         /// in a timely manner.
665         unfunded_channel_age_ticks: usize,
666 }
667
668 impl UnfundedChannelContext {
669         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
670         /// having reached the unfunded channel age limit.
671         ///
672         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
673         pub fn should_expire_unfunded_channel(&mut self) -> bool {
674                 self.unfunded_channel_age_ticks += 1;
675                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
676         }
677 }
678
679 /// Contains everything about the channel including state, and various flags.
680 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
681         config: LegacyChannelConfig,
682
683         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
684         // constructed using it. The second element in the tuple corresponds to the number of ticks that
685         // have elapsed since the update occurred.
686         prev_config: Option<(ChannelConfig, usize)>,
687
688         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
689
690         user_id: u128,
691
692         /// The current channel ID.
693         channel_id: ChannelId,
694         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
695         /// Will be `None` for channels created prior to 0.0.115.
696         temporary_channel_id: Option<ChannelId>,
697         channel_state: u32,
698
699         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
700         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
701         // next connect.
702         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
703         // Note that a number of our tests were written prior to the behavior here which retransmits
704         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
705         // many tests.
706         #[cfg(any(test, feature = "_test_utils"))]
707         pub(crate) announcement_sigs_state: AnnouncementSigsState,
708         #[cfg(not(any(test, feature = "_test_utils")))]
709         announcement_sigs_state: AnnouncementSigsState,
710
711         secp_ctx: Secp256k1<secp256k1::All>,
712         channel_value_satoshis: u64,
713
714         latest_monitor_update_id: u64,
715
716         holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
717         shutdown_scriptpubkey: Option<ShutdownScript>,
718         destination_script: Script,
719
720         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
721         // generation start at 0 and count up...this simplifies some parts of implementation at the
722         // cost of others, but should really just be changed.
723
724         cur_holder_commitment_transaction_number: u64,
725         cur_counterparty_commitment_transaction_number: u64,
726         value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
727         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
728         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
729         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
730
731         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
732         /// need to ensure we resend them in the order we originally generated them. Note that because
733         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
734         /// sufficient to simply set this to the opposite of any message we are generating as we
735         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
736         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
737         /// send it first.
738         resend_order: RAACommitmentOrder,
739
740         monitor_pending_channel_ready: bool,
741         monitor_pending_revoke_and_ack: bool,
742         monitor_pending_commitment_signed: bool,
743
744         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
745         // responsible for some of the HTLCs here or not - we don't know whether the update in question
746         // completed or not. We currently ignore these fields entirely when force-closing a channel,
747         // but need to handle this somehow or we run the risk of losing HTLCs!
748         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
749         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
750         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
751
752         // pending_update_fee is filled when sending and receiving update_fee.
753         //
754         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
755         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
756         // generating new commitment transactions with exactly the same criteria as inbound/outbound
757         // HTLCs with similar state.
758         pending_update_fee: Option<(u32, FeeUpdateState)>,
759         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
760         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
761         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
762         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
763         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
764         holding_cell_update_fee: Option<u32>,
765         next_holder_htlc_id: u64,
766         next_counterparty_htlc_id: u64,
767         feerate_per_kw: u32,
768
769         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
770         /// when the channel is updated in ways which may impact the `channel_update` message or when a
771         /// new block is received, ensuring it's always at least moderately close to the current real
772         /// time.
773         update_time_counter: u32,
774
775         #[cfg(debug_assertions)]
776         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
777         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
778         #[cfg(debug_assertions)]
779         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
780         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
781
782         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
783         target_closing_feerate_sats_per_kw: Option<u32>,
784
785         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
786         /// update, we need to delay processing it until later. We do that here by simply storing the
787         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
788         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
789
790         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
791         /// transaction. These are set once we reach `closing_negotiation_ready`.
792         #[cfg(test)]
793         pub(crate) closing_fee_limits: Option<(u64, u64)>,
794         #[cfg(not(test))]
795         closing_fee_limits: Option<(u64, u64)>,
796
797         /// The hash of the block in which the funding transaction was included.
798         funding_tx_confirmed_in: Option<BlockHash>,
799         funding_tx_confirmation_height: u32,
800         short_channel_id: Option<u64>,
801         /// Either the height at which this channel was created or the height at which it was last
802         /// serialized if it was serialized by versions prior to 0.0.103.
803         /// We use this to close if funding is never broadcasted.
804         channel_creation_height: u32,
805
806         counterparty_dust_limit_satoshis: u64,
807
808         #[cfg(test)]
809         pub(super) holder_dust_limit_satoshis: u64,
810         #[cfg(not(test))]
811         holder_dust_limit_satoshis: u64,
812
813         #[cfg(test)]
814         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
815         #[cfg(not(test))]
816         counterparty_max_htlc_value_in_flight_msat: u64,
817
818         #[cfg(test)]
819         pub(super) holder_max_htlc_value_in_flight_msat: u64,
820         #[cfg(not(test))]
821         holder_max_htlc_value_in_flight_msat: u64,
822
823         /// minimum channel reserve for self to maintain - set by them.
824         counterparty_selected_channel_reserve_satoshis: Option<u64>,
825
826         #[cfg(test)]
827         pub(super) holder_selected_channel_reserve_satoshis: u64,
828         #[cfg(not(test))]
829         holder_selected_channel_reserve_satoshis: u64,
830
831         counterparty_htlc_minimum_msat: u64,
832         holder_htlc_minimum_msat: u64,
833         #[cfg(test)]
834         pub counterparty_max_accepted_htlcs: u16,
835         #[cfg(not(test))]
836         counterparty_max_accepted_htlcs: u16,
837         holder_max_accepted_htlcs: u16,
838         minimum_depth: Option<u32>,
839
840         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
841
842         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
843         funding_transaction: Option<Transaction>,
844         is_batch_funding: Option<()>,
845
846         counterparty_cur_commitment_point: Option<PublicKey>,
847         counterparty_prev_commitment_point: Option<PublicKey>,
848         counterparty_node_id: PublicKey,
849
850         counterparty_shutdown_scriptpubkey: Option<Script>,
851
852         commitment_secrets: CounterpartyCommitmentSecrets,
853
854         channel_update_status: ChannelUpdateStatus,
855         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
856         /// not complete within a single timer tick (one minute), we should force-close the channel.
857         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
858         /// to DoS us.
859         /// Note that this field is reset to false on deserialization to give us a chance to connect to
860         /// our peer and start the closing_signed negotiation fresh.
861         closing_signed_in_flight: bool,
862
863         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
864         /// This can be used to rebroadcast the channel_announcement message later.
865         announcement_sigs: Option<(Signature, Signature)>,
866
867         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
868         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
869         // be, by comparing the cached values to the fee of the tranaction generated by
870         // `build_commitment_transaction`.
871         #[cfg(any(test, fuzzing))]
872         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
873         #[cfg(any(test, fuzzing))]
874         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
875
876         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
877         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
878         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
879         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
880         /// message until we receive a channel_reestablish.
881         ///
882         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
883         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
884
885         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
886         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
887         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
888         /// unblock the state machine.
889         ///
890         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
891         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
892         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
893         ///
894         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
895         /// [`msgs::RevokeAndACK`] message from the counterparty.
896         sent_message_awaiting_response: Option<usize>,
897
898         #[cfg(any(test, fuzzing))]
899         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
900         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
901         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
902         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
903         // is fine, but as a sanity check in our failure to generate the second claim, we check here
904         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
905         historical_inbound_htlc_fulfills: HashSet<u64>,
906
907         /// This channel's type, as negotiated during channel open
908         channel_type: ChannelTypeFeatures,
909
910         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
911         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
912         // the channel's funding UTXO.
913         //
914         // We also use this when sending our peer a channel_update that isn't to be broadcasted
915         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
916         // associated channel mapping.
917         //
918         // We only bother storing the most recent SCID alias at any time, though our counterparty has
919         // to store all of them.
920         latest_inbound_scid_alias: Option<u64>,
921
922         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
923         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
924         // don't currently support node id aliases and eventually privacy should be provided with
925         // blinded paths instead of simple scid+node_id aliases.
926         outbound_scid_alias: u64,
927
928         // We track whether we already emitted a `ChannelPending` event.
929         channel_pending_event_emitted: bool,
930
931         // We track whether we already emitted a `ChannelReady` event.
932         channel_ready_event_emitted: bool,
933
934         /// The unique identifier used to re-derive the private key material for the channel through
935         /// [`SignerProvider::derive_channel_signer`].
936         channel_keys_id: [u8; 32],
937
938         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
939         /// store it here and only release it to the `ChannelManager` once it asks for it.
940         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
941 }
942
943 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
944         /// Allowed in any state (including after shutdown)
945         pub fn get_update_time_counter(&self) -> u32 {
946                 self.update_time_counter
947         }
948
949         pub fn get_latest_monitor_update_id(&self) -> u64 {
950                 self.latest_monitor_update_id
951         }
952
953         pub fn should_announce(&self) -> bool {
954                 self.config.announced_channel
955         }
956
957         pub fn is_outbound(&self) -> bool {
958                 self.channel_transaction_parameters.is_outbound_from_holder
959         }
960
961         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
962         /// Allowed in any state (including after shutdown)
963         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
964                 self.config.options.forwarding_fee_base_msat
965         }
966
967         /// Returns true if we've ever received a message from the remote end for this Channel
968         pub fn have_received_message(&self) -> bool {
969                 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
970         }
971
972         /// Returns true if this channel is fully established and not known to be closing.
973         /// Allowed in any state (including after shutdown)
974         pub fn is_usable(&self) -> bool {
975                 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
976                 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
977         }
978
979         /// shutdown state returns the state of the channel in its various stages of shutdown
980         pub fn shutdown_state(&self) -> ChannelShutdownState {
981                 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
982                         return ChannelShutdownState::ShutdownComplete;
983                 }
984                 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
985                         return ChannelShutdownState::ShutdownInitiated;
986                 }
987                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
988                         return ChannelShutdownState::ResolvingHTLCs;
989                 }
990                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
991                         return ChannelShutdownState::NegotiatingClosingFee;
992                 }
993                 return ChannelShutdownState::NotShuttingDown;
994         }
995
996         fn closing_negotiation_ready(&self) -> bool {
997                 self.pending_inbound_htlcs.is_empty() &&
998                 self.pending_outbound_htlcs.is_empty() &&
999                 self.pending_update_fee.is_none() &&
1000                 self.channel_state &
1001                 (BOTH_SIDES_SHUTDOWN_MASK |
1002                         ChannelState::AwaitingRemoteRevoke as u32 |
1003                         ChannelState::PeerDisconnected as u32 |
1004                         ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1005         }
1006
1007         /// Returns true if this channel is currently available for use. This is a superset of
1008         /// is_usable() and considers things like the channel being temporarily disabled.
1009         /// Allowed in any state (including after shutdown)
1010         pub fn is_live(&self) -> bool {
1011                 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1012         }
1013
1014         // Public utilities:
1015
1016         pub fn channel_id(&self) -> ChannelId {
1017                 self.channel_id
1018         }
1019
1020         // Return the `temporary_channel_id` used during channel establishment.
1021         //
1022         // Will return `None` for channels created prior to LDK version 0.0.115.
1023         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1024                 self.temporary_channel_id
1025         }
1026
1027         pub fn minimum_depth(&self) -> Option<u32> {
1028                 self.minimum_depth
1029         }
1030
1031         /// Gets the "user_id" value passed into the construction of this channel. It has no special
1032         /// meaning and exists only to allow users to have a persistent identifier of a channel.
1033         pub fn get_user_id(&self) -> u128 {
1034                 self.user_id
1035         }
1036
1037         /// Gets the channel's type
1038         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1039                 &self.channel_type
1040         }
1041
1042         /// Gets the channel's `short_channel_id`.
1043         ///
1044         /// Will return `None` if the channel hasn't been confirmed yet.
1045         pub fn get_short_channel_id(&self) -> Option<u64> {
1046                 self.short_channel_id
1047         }
1048
1049         /// Allowed in any state (including after shutdown)
1050         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1051                 self.latest_inbound_scid_alias
1052         }
1053
1054         /// Allowed in any state (including after shutdown)
1055         pub fn outbound_scid_alias(&self) -> u64 {
1056                 self.outbound_scid_alias
1057         }
1058
1059         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1060         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1061         /// or prior to any channel actions during `Channel` initialization.
1062         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1063                 debug_assert_eq!(self.outbound_scid_alias, 0);
1064                 self.outbound_scid_alias = outbound_scid_alias;
1065         }
1066
1067         /// Returns the funding_txo we either got from our peer, or were given by
1068         /// get_funding_created.
1069         pub fn get_funding_txo(&self) -> Option<OutPoint> {
1070                 self.channel_transaction_parameters.funding_outpoint
1071         }
1072
1073         /// Returns the block hash in which our funding transaction was confirmed.
1074         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1075                 self.funding_tx_confirmed_in
1076         }
1077
1078         /// Returns the current number of confirmations on the funding transaction.
1079         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1080                 if self.funding_tx_confirmation_height == 0 {
1081                         // We either haven't seen any confirmation yet, or observed a reorg.
1082                         return 0;
1083                 }
1084
1085                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1086         }
1087
1088         fn get_holder_selected_contest_delay(&self) -> u16 {
1089                 self.channel_transaction_parameters.holder_selected_contest_delay
1090         }
1091
1092         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1093                 &self.channel_transaction_parameters.holder_pubkeys
1094         }
1095
1096         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1097                 self.channel_transaction_parameters.counterparty_parameters
1098                         .as_ref().map(|params| params.selected_contest_delay)
1099         }
1100
1101         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1102                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1103         }
1104
1105         /// Allowed in any state (including after shutdown)
1106         pub fn get_counterparty_node_id(&self) -> PublicKey {
1107                 self.counterparty_node_id
1108         }
1109
1110         /// Allowed in any state (including after shutdown)
1111         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1112                 self.holder_htlc_minimum_msat
1113         }
1114
1115         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1116         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1117                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1118         }
1119
1120         /// Allowed in any state (including after shutdown)
1121         pub fn get_announced_htlc_max_msat(&self) -> u64 {
1122                 return cmp::min(
1123                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1124                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
1125                         // channel might have been used to route very small values (either by honest users or as DoS).
1126                         self.channel_value_satoshis * 1000 * 9 / 10,
1127
1128                         self.counterparty_max_htlc_value_in_flight_msat
1129                 );
1130         }
1131
1132         /// Allowed in any state (including after shutdown)
1133         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1134                 self.counterparty_htlc_minimum_msat
1135         }
1136
1137         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1138         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1139                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1140         }
1141
1142         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1143                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1144                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1145                         cmp::min(
1146                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1147                                 party_max_htlc_value_in_flight_msat
1148                         )
1149                 })
1150         }
1151
1152         pub fn get_value_satoshis(&self) -> u64 {
1153                 self.channel_value_satoshis
1154         }
1155
1156         pub fn get_fee_proportional_millionths(&self) -> u32 {
1157                 self.config.options.forwarding_fee_proportional_millionths
1158         }
1159
1160         pub fn get_cltv_expiry_delta(&self) -> u16 {
1161                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1162         }
1163
1164         pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1165                 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1166         where F::Target: FeeEstimator
1167         {
1168                 match self.config.options.max_dust_htlc_exposure {
1169                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1170                                 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1171                                         ConfirmationTarget::HighPriority);
1172                                 feerate_per_kw as u64 * multiplier
1173                         },
1174                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1175                 }
1176         }
1177
1178         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1179         pub fn prev_config(&self) -> Option<ChannelConfig> {
1180                 self.prev_config.map(|prev_config| prev_config.0)
1181         }
1182
1183         // Checks whether we should emit a `ChannelPending` event.
1184         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1185                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1186         }
1187
1188         // Returns whether we already emitted a `ChannelPending` event.
1189         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1190                 self.channel_pending_event_emitted
1191         }
1192
1193         // Remembers that we already emitted a `ChannelPending` event.
1194         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1195                 self.channel_pending_event_emitted = true;
1196         }
1197
1198         // Checks whether we should emit a `ChannelReady` event.
1199         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1200                 self.is_usable() && !self.channel_ready_event_emitted
1201         }
1202
1203         // Remembers that we already emitted a `ChannelReady` event.
1204         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1205                 self.channel_ready_event_emitted = true;
1206         }
1207
1208         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1209         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1210         /// no longer be considered when forwarding HTLCs.
1211         pub fn maybe_expire_prev_config(&mut self) {
1212                 if self.prev_config.is_none() {
1213                         return;
1214                 }
1215                 let prev_config = self.prev_config.as_mut().unwrap();
1216                 prev_config.1 += 1;
1217                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1218                         self.prev_config = None;
1219                 }
1220         }
1221
1222         /// Returns the current [`ChannelConfig`] applied to the channel.
1223         pub fn config(&self) -> ChannelConfig {
1224                 self.config.options
1225         }
1226
1227         /// Updates the channel's config. A bool is returned indicating whether the config update
1228         /// applied resulted in a new ChannelUpdate message.
1229         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1230                 let did_channel_update =
1231                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1232                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1233                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1234                 if did_channel_update {
1235                         self.prev_config = Some((self.config.options, 0));
1236                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1237                         // policy change to propagate throughout the network.
1238                         self.update_time_counter += 1;
1239                 }
1240                 self.config.options = *config;
1241                 did_channel_update
1242         }
1243
1244         /// Returns true if funding_signed was sent/received and the
1245         /// funding transaction has been broadcast if necessary.
1246         pub fn is_funding_broadcast(&self) -> bool {
1247                 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1248                         self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1249         }
1250
1251         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1252         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1253         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1254         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1255         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1256         /// an HTLC to a).
1257         /// @local is used only to convert relevant internal structures which refer to remote vs local
1258         /// to decide value of outputs and direction of HTLCs.
1259         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1260         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1261         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1262         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1263         /// which peer generated this transaction and "to whom" this transaction flows.
1264         #[inline]
1265         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1266                 where L::Target: Logger
1267         {
1268                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1269                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1270                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1271
1272                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1273                 let mut remote_htlc_total_msat = 0;
1274                 let mut local_htlc_total_msat = 0;
1275                 let mut value_to_self_msat_offset = 0;
1276
1277                 let mut feerate_per_kw = self.feerate_per_kw;
1278                 if let Some((feerate, update_state)) = self.pending_update_fee {
1279                         if match update_state {
1280                                 // Note that these match the inclusion criteria when scanning
1281                                 // pending_inbound_htlcs below.
1282                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1283                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1284                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
1285                         } {
1286                                 feerate_per_kw = feerate;
1287                         }
1288                 }
1289
1290                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1291                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1292                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1293                         &self.channel_id,
1294                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1295
1296                 macro_rules! get_htlc_in_commitment {
1297                         ($htlc: expr, $offered: expr) => {
1298                                 HTLCOutputInCommitment {
1299                                         offered: $offered,
1300                                         amount_msat: $htlc.amount_msat,
1301                                         cltv_expiry: $htlc.cltv_expiry,
1302                                         payment_hash: $htlc.payment_hash,
1303                                         transaction_output_index: None
1304                                 }
1305                         }
1306                 }
1307
1308                 macro_rules! add_htlc_output {
1309                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1310                                 if $outbound == local { // "offered HTLC output"
1311                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1312                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1313                                                 0
1314                                         } else {
1315                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1316                                         };
1317                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1318                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1319                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1320                                         } else {
1321                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1322                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1323                                         }
1324                                 } else {
1325                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1326                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1327                                                 0
1328                                         } else {
1329                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1330                                         };
1331                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1332                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1333                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1334                                         } else {
1335                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1336                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1337                                         }
1338                                 }
1339                         }
1340                 }
1341
1342                 for ref htlc in self.pending_inbound_htlcs.iter() {
1343                         let (include, state_name) = match htlc.state {
1344                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1345                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1346                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1347                                 InboundHTLCState::Committed => (true, "Committed"),
1348                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1349                         };
1350
1351                         if include {
1352                                 add_htlc_output!(htlc, false, None, state_name);
1353                                 remote_htlc_total_msat += htlc.amount_msat;
1354                         } else {
1355                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1356                                 match &htlc.state {
1357                                         &InboundHTLCState::LocalRemoved(ref reason) => {
1358                                                 if generated_by_local {
1359                                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1360                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
1361                                                         }
1362                                                 }
1363                                         },
1364                                         _ => {},
1365                                 }
1366                         }
1367                 }
1368
1369                 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1370
1371                 for ref htlc in self.pending_outbound_htlcs.iter() {
1372                         let (include, state_name) = match htlc.state {
1373                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1374                                 OutboundHTLCState::Committed => (true, "Committed"),
1375                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1376                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1377                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1378                         };
1379
1380                         let preimage_opt = match htlc.state {
1381                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1382                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1383                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1384                                 _ => None,
1385                         };
1386
1387                         if let Some(preimage) = preimage_opt {
1388                                 preimages.push(preimage);
1389                         }
1390
1391                         if include {
1392                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1393                                 local_htlc_total_msat += htlc.amount_msat;
1394                         } else {
1395                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1396                                 match htlc.state {
1397                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1398                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
1399                                         },
1400                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1401                                                 if !generated_by_local {
1402                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
1403                                                 }
1404                                         },
1405                                         _ => {},
1406                                 }
1407                         }
1408                 }
1409
1410                 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1411                 assert!(value_to_self_msat >= 0);
1412                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1413                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1414                 // "violate" their reserve value by couting those against it. Thus, we have to convert
1415                 // everything to i64 before subtracting as otherwise we can overflow.
1416                 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1417                 assert!(value_to_remote_msat >= 0);
1418
1419                 #[cfg(debug_assertions)]
1420                 {
1421                         // Make sure that the to_self/to_remote is always either past the appropriate
1422                         // channel_reserve *or* it is making progress towards it.
1423                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1424                                 self.holder_max_commitment_tx_output.lock().unwrap()
1425                         } else {
1426                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
1427                         };
1428                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1429                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1430                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1431                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1432                 }
1433
1434                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1435                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1436                 let (value_to_self, value_to_remote) = if self.is_outbound() {
1437                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1438                 } else {
1439                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1440                 };
1441
1442                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1443                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1444                 let (funding_pubkey_a, funding_pubkey_b) = if local {
1445                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1446                 } else {
1447                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1448                 };
1449
1450                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1451                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1452                 } else {
1453                         value_to_a = 0;
1454                 }
1455
1456                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1457                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1458                 } else {
1459                         value_to_b = 0;
1460                 }
1461
1462                 let num_nondust_htlcs = included_non_dust_htlcs.len();
1463
1464                 let channel_parameters =
1465                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1466                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1467                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1468                                                                              value_to_a as u64,
1469                                                                              value_to_b as u64,
1470                                                                              funding_pubkey_a,
1471                                                                              funding_pubkey_b,
1472                                                                              keys.clone(),
1473                                                                              feerate_per_kw,
1474                                                                              &mut included_non_dust_htlcs,
1475                                                                              &channel_parameters
1476                 );
1477                 let mut htlcs_included = included_non_dust_htlcs;
1478                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1479                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1480                 htlcs_included.append(&mut included_dust_htlcs);
1481
1482                 // For the stats, trimmed-to-0 the value in msats accordingly
1483                 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1484                 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1485
1486                 CommitmentStats {
1487                         tx,
1488                         feerate_per_kw,
1489                         total_fee_sat,
1490                         num_nondust_htlcs,
1491                         htlcs_included,
1492                         local_balance_msat: value_to_self_msat as u64,
1493                         remote_balance_msat: value_to_remote_msat as u64,
1494                         preimages
1495                 }
1496         }
1497
1498         #[inline]
1499         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1500         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1501         /// our counterparty!)
1502         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1503         /// TODO Some magic rust shit to compile-time check this?
1504         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1505                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1506                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1507                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1508                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1509
1510                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1511         }
1512
1513         #[inline]
1514         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1515         /// will sign and send to our counterparty.
1516         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1517         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1518                 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1519                 //may see payments to it!
1520                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1521                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1522                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1523
1524                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1525         }
1526
1527         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1528         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1529         /// Panics if called before accept_channel/InboundV1Channel::new
1530         pub fn get_funding_redeemscript(&self) -> Script {
1531                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1532         }
1533
1534         fn counterparty_funding_pubkey(&self) -> &PublicKey {
1535                 &self.get_counterparty_pubkeys().funding_pubkey
1536         }
1537
1538         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1539                 self.feerate_per_kw
1540         }
1541
1542         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1543                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1544                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1545                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1546                 // more dust balance if the feerate increases when we have several HTLCs pending
1547                 // which are near the dust limit.
1548                 let mut feerate_per_kw = self.feerate_per_kw;
1549                 // If there's a pending update fee, use it to ensure we aren't under-estimating
1550                 // potential feerate updates coming soon.
1551                 if let Some((feerate, _)) = self.pending_update_fee {
1552                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1553                 }
1554                 if let Some(feerate) = outbound_feerate_update {
1555                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1556                 }
1557                 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1558         }
1559
1560         /// Get forwarding information for the counterparty.
1561         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1562                 self.counterparty_forwarding_info.clone()
1563         }
1564
1565         /// Returns a HTLCStats about inbound pending htlcs
1566         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1567                 let context = self;
1568                 let mut stats = HTLCStats {
1569                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1570                         pending_htlcs_value_msat: 0,
1571                         on_counterparty_tx_dust_exposure_msat: 0,
1572                         on_holder_tx_dust_exposure_msat: 0,
1573                         holding_cell_msat: 0,
1574                         on_holder_tx_holding_cell_htlcs_count: 0,
1575                 };
1576
1577                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1578                         (0, 0)
1579                 } else {
1580                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1581                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1582                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1583                 };
1584                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1585                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1586                 for ref htlc in context.pending_inbound_htlcs.iter() {
1587                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1588                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1589                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1590                         }
1591                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1592                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1593                         }
1594                 }
1595                 stats
1596         }
1597
1598         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1599         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1600                 let context = self;
1601                 let mut stats = HTLCStats {
1602                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1603                         pending_htlcs_value_msat: 0,
1604                         on_counterparty_tx_dust_exposure_msat: 0,
1605                         on_holder_tx_dust_exposure_msat: 0,
1606                         holding_cell_msat: 0,
1607                         on_holder_tx_holding_cell_htlcs_count: 0,
1608                 };
1609
1610                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1611                         (0, 0)
1612                 } else {
1613                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1614                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1615                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1616                 };
1617                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1618                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1619                 for ref htlc in context.pending_outbound_htlcs.iter() {
1620                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1621                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1622                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1623                         }
1624                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1625                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1626                         }
1627                 }
1628
1629                 for update in context.holding_cell_htlc_updates.iter() {
1630                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1631                                 stats.pending_htlcs += 1;
1632                                 stats.pending_htlcs_value_msat += amount_msat;
1633                                 stats.holding_cell_msat += amount_msat;
1634                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1635                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1636                                 }
1637                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1638                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
1639                                 } else {
1640                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
1641                                 }
1642                         }
1643                 }
1644                 stats
1645         }
1646
1647         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1648         /// Doesn't bother handling the
1649         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1650         /// corner case properly.
1651         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1652         -> AvailableBalances
1653         where F::Target: FeeEstimator
1654         {
1655                 let context = &self;
1656                 // Note that we have to handle overflow due to the above case.
1657                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1658                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1659
1660                 let mut balance_msat = context.value_to_self_msat;
1661                 for ref htlc in context.pending_inbound_htlcs.iter() {
1662                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1663                                 balance_msat += htlc.amount_msat;
1664                         }
1665                 }
1666                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1667
1668                 let outbound_capacity_msat = context.value_to_self_msat
1669                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1670                                 .saturating_sub(
1671                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1672
1673                 let mut available_capacity_msat = outbound_capacity_msat;
1674
1675                 if context.is_outbound() {
1676                         // We should mind channel commit tx fee when computing how much of the available capacity
1677                         // can be used in the next htlc. Mirrors the logic in send_htlc.
1678                         //
1679                         // The fee depends on whether the amount we will be sending is above dust or not,
1680                         // and the answer will in turn change the amount itself â€” making it a circular
1681                         // dependency.
1682                         // This complicates the computation around dust-values, up to the one-htlc-value.
1683                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1684                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1685                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1686                         }
1687
1688                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1689                         let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1690                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1691                         let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1692
1693                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
1694                         // value ends up being below dust, we have this fee available again. In that case,
1695                         // match the value to right-below-dust.
1696                         let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
1697                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1698                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1699                                 debug_assert!(one_htlc_difference_msat != 0);
1700                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1701                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1702                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1703                         } else {
1704                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1705                         }
1706                 } else {
1707                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1708                         // sending a new HTLC won't reduce their balance below our reserve threshold.
1709                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1710                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1711                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1712                         }
1713
1714                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1715                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1716
1717                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1718                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1719                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1720
1721                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
1722                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1723                                 // we've selected for them, we can only send dust HTLCs.
1724                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1725                         }
1726                 }
1727
1728                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1729
1730                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1731                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1732                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1733                 // send above the dust limit (as the router can always overpay to meet the dust limit).
1734                 let mut remaining_msat_below_dust_exposure_limit = None;
1735                 let mut dust_exposure_dust_limit_msat = 0;
1736                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1737
1738                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1739                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1740                 } else {
1741                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1742                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1743                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1744                 };
1745                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1746                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1747                         remaining_msat_below_dust_exposure_limit =
1748                                 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1749                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1750                 }
1751
1752                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1753                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1754                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1755                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1756                                 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1757                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1758                 }
1759
1760                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1761                         if available_capacity_msat < dust_exposure_dust_limit_msat {
1762                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1763                         } else {
1764                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1765                         }
1766                 }
1767
1768                 available_capacity_msat = cmp::min(available_capacity_msat,
1769                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1770
1771                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1772                         available_capacity_msat = 0;
1773                 }
1774
1775                 AvailableBalances {
1776                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1777                                         - context.value_to_self_msat as i64
1778                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1779                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1780                                 0) as u64,
1781                         outbound_capacity_msat,
1782                         next_outbound_htlc_limit_msat: available_capacity_msat,
1783                         next_outbound_htlc_minimum_msat,
1784                         balance_msat,
1785                 }
1786         }
1787
1788         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1789                 let context = &self;
1790                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1791         }
1792
1793         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1794         /// number of pending HTLCs that are on track to be in our next commitment tx.
1795         ///
1796         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1797         /// `fee_spike_buffer_htlc` is `Some`.
1798         ///
1799         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1800         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1801         ///
1802         /// Dust HTLCs are excluded.
1803         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1804                 let context = &self;
1805                 assert!(context.is_outbound());
1806
1807                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1808                         (0, 0)
1809                 } else {
1810                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1811                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1812                 };
1813                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1814                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1815
1816                 let mut addl_htlcs = 0;
1817                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1818                 match htlc.origin {
1819                         HTLCInitiator::LocalOffered => {
1820                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1821                                         addl_htlcs += 1;
1822                                 }
1823                         },
1824                         HTLCInitiator::RemoteOffered => {
1825                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1826                                         addl_htlcs += 1;
1827                                 }
1828                         }
1829                 }
1830
1831                 let mut included_htlcs = 0;
1832                 for ref htlc in context.pending_inbound_htlcs.iter() {
1833                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1834                                 continue
1835                         }
1836                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1837                         // transaction including this HTLC if it times out before they RAA.
1838                         included_htlcs += 1;
1839                 }
1840
1841                 for ref htlc in context.pending_outbound_htlcs.iter() {
1842                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1843                                 continue
1844                         }
1845                         match htlc.state {
1846                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1847                                 OutboundHTLCState::Committed => included_htlcs += 1,
1848                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1849                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1850                                 // transaction won't be generated until they send us their next RAA, which will mean
1851                                 // dropping any HTLCs in this state.
1852                                 _ => {},
1853                         }
1854                 }
1855
1856                 for htlc in context.holding_cell_htlc_updates.iter() {
1857                         match htlc {
1858                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1859                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
1860                                                 continue
1861                                         }
1862                                         included_htlcs += 1
1863                                 },
1864                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1865                                          // ack we're guaranteed to never include them in commitment txs anymore.
1866                         }
1867                 }
1868
1869                 let num_htlcs = included_htlcs + addl_htlcs;
1870                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1871                 #[cfg(any(test, fuzzing))]
1872                 {
1873                         let mut fee = res;
1874                         if fee_spike_buffer_htlc.is_some() {
1875                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1876                         }
1877                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1878                                 + context.holding_cell_htlc_updates.len();
1879                         let commitment_tx_info = CommitmentTxInfoCached {
1880                                 fee,
1881                                 total_pending_htlcs,
1882                                 next_holder_htlc_id: match htlc.origin {
1883                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1884                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1885                                 },
1886                                 next_counterparty_htlc_id: match htlc.origin {
1887                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1888                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1889                                 },
1890                                 feerate: context.feerate_per_kw,
1891                         };
1892                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1893                 }
1894                 res
1895         }
1896
1897         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1898         /// pending HTLCs that are on track to be in their next commitment tx
1899         ///
1900         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1901         /// `fee_spike_buffer_htlc` is `Some`.
1902         ///
1903         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1904         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1905         ///
1906         /// Dust HTLCs are excluded.
1907         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1908                 let context = &self;
1909                 assert!(!context.is_outbound());
1910
1911                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1912                         (0, 0)
1913                 } else {
1914                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1915                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1916                 };
1917                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1918                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1919
1920                 let mut addl_htlcs = 0;
1921                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1922                 match htlc.origin {
1923                         HTLCInitiator::LocalOffered => {
1924                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1925                                         addl_htlcs += 1;
1926                                 }
1927                         },
1928                         HTLCInitiator::RemoteOffered => {
1929                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1930                                         addl_htlcs += 1;
1931                                 }
1932                         }
1933                 }
1934
1935                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1936                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1937                 // committed outbound HTLCs, see below.
1938                 let mut included_htlcs = 0;
1939                 for ref htlc in context.pending_inbound_htlcs.iter() {
1940                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1941                                 continue
1942                         }
1943                         included_htlcs += 1;
1944                 }
1945
1946                 for ref htlc in context.pending_outbound_htlcs.iter() {
1947                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1948                                 continue
1949                         }
1950                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1951                         // i.e. if they've responded to us with an RAA after announcement.
1952                         match htlc.state {
1953                                 OutboundHTLCState::Committed => included_htlcs += 1,
1954                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1955                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1956                                 _ => {},
1957                         }
1958                 }
1959
1960                 let num_htlcs = included_htlcs + addl_htlcs;
1961                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1962                 #[cfg(any(test, fuzzing))]
1963                 {
1964                         let mut fee = res;
1965                         if fee_spike_buffer_htlc.is_some() {
1966                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1967                         }
1968                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1969                         let commitment_tx_info = CommitmentTxInfoCached {
1970                                 fee,
1971                                 total_pending_htlcs,
1972                                 next_holder_htlc_id: match htlc.origin {
1973                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1974                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1975                                 },
1976                                 next_counterparty_htlc_id: match htlc.origin {
1977                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1978                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1979                                 },
1980                                 feerate: context.feerate_per_kw,
1981                         };
1982                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1983                 }
1984                 res
1985         }
1986
1987         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
1988                 where F: Fn() -> Option<O> {
1989                 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
1990                    self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
1991                         f()
1992                 } else {
1993                         None
1994                 }
1995         }
1996
1997         /// Returns the transaction if there is a pending funding transaction that is yet to be
1998         /// broadcast.
1999         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2000                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2001         }
2002
2003         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2004         /// broadcast.
2005         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2006                 self.if_unbroadcasted_funding(||
2007                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2008                 )
2009         }
2010
2011         /// Returns whether the channel is funded in a batch.
2012         pub fn is_batch_funding(&self) -> bool {
2013                 self.is_batch_funding.is_some()
2014         }
2015
2016         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2017         /// broadcast.
2018         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2019                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2020         }
2021
2022         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2023         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2024         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2025         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2026         /// immediately (others we will have to allow to time out).
2027         pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2028                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2029                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2030                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2031                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2032                 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2033
2034                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2035                 // return them to fail the payment.
2036                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2037                 let counterparty_node_id = self.get_counterparty_node_id();
2038                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2039                         match htlc_update {
2040                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2041                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2042                                 },
2043                                 _ => {}
2044                         }
2045                 }
2046                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2047                         // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2048                         // returning a channel monitor update here would imply a channel monitor update before
2049                         // we even registered the channel monitor to begin with, which is invalid.
2050                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
2051                         // funding transaction, don't return a funding txo (which prevents providing the
2052                         // monitor update to the user, even if we return one).
2053                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2054                         if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2055                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2056                                 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2057                                         update_id: self.latest_monitor_update_id,
2058                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2059                                 }))
2060                         } else { None }
2061                 } else { None };
2062                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2063
2064                 self.channel_state = ChannelState::ShutdownComplete as u32;
2065                 self.update_time_counter += 1;
2066                 (monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid)
2067         }
2068 }
2069
2070 // Internal utility functions for channels
2071
2072 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2073 /// `channel_value_satoshis` in msat, set through
2074 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2075 ///
2076 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2077 ///
2078 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2079 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2080         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2081                 1
2082         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2083                 100
2084         } else {
2085                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2086         };
2087         channel_value_satoshis * 10 * configured_percent
2088 }
2089
2090 /// Returns a minimum channel reserve value the remote needs to maintain,
2091 /// required by us according to the configured or default
2092 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2093 ///
2094 /// Guaranteed to return a value no larger than channel_value_satoshis
2095 ///
2096 /// This is used both for outbound and inbound channels and has lower bound
2097 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2098 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2099         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2100         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2101 }
2102
2103 /// This is for legacy reasons, present for forward-compatibility.
2104 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2105 /// from storage. Hence, we use this function to not persist default values of
2106 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2107 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2108         let (q, _) = channel_value_satoshis.overflowing_div(100);
2109         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2110 }
2111
2112 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2113 // Note that num_htlcs should not include dust HTLCs.
2114 #[inline]
2115 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2116         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2117 }
2118
2119 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2120 // Note that num_htlcs should not include dust HTLCs.
2121 fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2122         // Note that we need to divide before multiplying to round properly,
2123         // since the lowest denomination of bitcoin on-chain is the satoshi.
2124         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2125 }
2126
2127 // Holder designates channel data owned for the benefit of the user client.
2128 // Counterparty designates channel data owned by the another channel participant entity.
2129 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2130         pub context: ChannelContext<SP>,
2131 }
2132
2133 #[cfg(any(test, fuzzing))]
2134 struct CommitmentTxInfoCached {
2135         fee: u64,
2136         total_pending_htlcs: usize,
2137         next_holder_htlc_id: u64,
2138         next_counterparty_htlc_id: u64,
2139         feerate: u32,
2140 }
2141
2142 impl<SP: Deref> Channel<SP> where
2143         SP::Target: SignerProvider,
2144         <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2145 {
2146         fn check_remote_fee<F: Deref, L: Deref>(
2147                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2148                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2149         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2150         {
2151                 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2152                 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2153                 // We generally don't care too much if they set the feerate to something very high, but it
2154                 // could result in the channel being useless due to everything being dust. This doesn't
2155                 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2156                 // zero fee, so their fee is no longer considered to determine dust limits.
2157                 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2158                         let upper_limit = cmp::max(250 * 25,
2159                                 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
2160                         if feerate_per_kw as u64 > upper_limit {
2161                                 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2162                         }
2163                 }
2164
2165                 // We can afford to use a lower bound with anchors than previously since we can now bump
2166                 // fees when broadcasting our commitment. However, we must still make sure we meet the
2167                 // minimum mempool feerate, until package relay is deployed, such that we can ensure the
2168                 // commitment transaction propagates throughout node mempools on its own.
2169                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2170                         ConfirmationTarget::MempoolMinimum
2171                 } else {
2172                         ConfirmationTarget::Background
2173                 };
2174                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2175                 // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
2176                 // occasional issues with feerate disagreements between an initiator that wants a feerate
2177                 // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
2178                 // sat/kw before the comparison here.
2179                 if feerate_per_kw + 250 < lower_limit {
2180                         if let Some(cur_feerate) = cur_feerate_per_kw {
2181                                 if feerate_per_kw > cur_feerate {
2182                                         log_warn!(logger,
2183                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2184                                                 cur_feerate, feerate_per_kw);
2185                                         return Ok(());
2186                                 }
2187                         }
2188                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
2189                 }
2190                 Ok(())
2191         }
2192
2193         #[inline]
2194         fn get_closing_scriptpubkey(&self) -> Script {
2195                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2196                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2197                 // outside of those situations will fail.
2198                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2199         }
2200
2201         #[inline]
2202         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2203                 let mut ret =
2204                 (4 +                                                   // version
2205                  1 +                                                   // input count
2206                  36 +                                                  // prevout
2207                  1 +                                                   // script length (0)
2208                  4 +                                                   // sequence
2209                  1 +                                                   // output count
2210                  4                                                     // lock time
2211                  )*4 +                                                 // * 4 for non-witness parts
2212                 2 +                                                    // witness marker and flag
2213                 1 +                                                    // witness element count
2214                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
2215                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2216                 2*(1 + 71);                                            // two signatures + sighash type flags
2217                 if let Some(spk) = a_scriptpubkey {
2218                         ret += ((8+1) +                                    // output values and script length
2219                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2220                 }
2221                 if let Some(spk) = b_scriptpubkey {
2222                         ret += ((8+1) +                                    // output values and script length
2223                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2224                 }
2225                 ret
2226         }
2227
2228         #[inline]
2229         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2230                 assert!(self.context.pending_inbound_htlcs.is_empty());
2231                 assert!(self.context.pending_outbound_htlcs.is_empty());
2232                 assert!(self.context.pending_update_fee.is_none());
2233
2234                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2235                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2236                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2237
2238                 if value_to_holder < 0 {
2239                         assert!(self.context.is_outbound());
2240                         total_fee_satoshis += (-value_to_holder) as u64;
2241                 } else if value_to_counterparty < 0 {
2242                         assert!(!self.context.is_outbound());
2243                         total_fee_satoshis += (-value_to_counterparty) as u64;
2244                 }
2245
2246                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2247                         value_to_counterparty = 0;
2248                 }
2249
2250                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2251                         value_to_holder = 0;
2252                 }
2253
2254                 assert!(self.context.shutdown_scriptpubkey.is_some());
2255                 let holder_shutdown_script = self.get_closing_scriptpubkey();
2256                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2257                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2258
2259                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2260                 (closing_transaction, total_fee_satoshis)
2261         }
2262
2263         fn funding_outpoint(&self) -> OutPoint {
2264                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2265         }
2266
2267         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2268         /// entirely.
2269         ///
2270         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2271         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2272         ///
2273         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2274         /// disconnected).
2275         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2276                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2277         where L::Target: Logger {
2278                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2279                 // (see equivalent if condition there).
2280                 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2281                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2282                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2283                 self.context.latest_monitor_update_id = mon_update_id;
2284                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2285                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2286                 }
2287         }
2288
2289         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2290                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2291                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2292                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2293                 // either.
2294                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2295                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2296                 }
2297                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2298
2299                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2300                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2301                 // these, but for now we just have to treat them as normal.
2302
2303                 let mut pending_idx = core::usize::MAX;
2304                 let mut htlc_value_msat = 0;
2305                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2306                         if htlc.htlc_id == htlc_id_arg {
2307                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2308                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2309                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2310                                 match htlc.state {
2311                                         InboundHTLCState::Committed => {},
2312                                         InboundHTLCState::LocalRemoved(ref reason) => {
2313                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2314                                                 } else {
2315                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2316                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2317                                                 }
2318                                                 return UpdateFulfillFetch::DuplicateClaim {};
2319                                         },
2320                                         _ => {
2321                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2322                                                 // Don't return in release mode here so that we can update channel_monitor
2323                                         }
2324                                 }
2325                                 pending_idx = idx;
2326                                 htlc_value_msat = htlc.amount_msat;
2327                                 break;
2328                         }
2329                 }
2330                 if pending_idx == core::usize::MAX {
2331                         #[cfg(any(test, fuzzing))]
2332                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2333                         // this is simply a duplicate claim, not previously failed and we lost funds.
2334                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2335                         return UpdateFulfillFetch::DuplicateClaim {};
2336                 }
2337
2338                 // Now update local state:
2339                 //
2340                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2341                 // can claim it even if the channel hits the chain before we see their next commitment.
2342                 self.context.latest_monitor_update_id += 1;
2343                 let monitor_update = ChannelMonitorUpdate {
2344                         update_id: self.context.latest_monitor_update_id,
2345                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2346                                 payment_preimage: payment_preimage_arg.clone(),
2347                         }],
2348                 };
2349
2350                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2351                         // Note that this condition is the same as the assertion in
2352                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2353                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2354                         // do not not get into this branch.
2355                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2356                                 match pending_update {
2357                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2358                                                 if htlc_id_arg == htlc_id {
2359                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
2360                                                         self.context.latest_monitor_update_id -= 1;
2361                                                         #[cfg(any(test, fuzzing))]
2362                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2363                                                         return UpdateFulfillFetch::DuplicateClaim {};
2364                                                 }
2365                                         },
2366                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2367                                                 if htlc_id_arg == htlc_id {
2368                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2369                                                         // TODO: We may actually be able to switch to a fulfill here, though its
2370                                                         // rare enough it may not be worth the complexity burden.
2371                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2372                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2373                                                 }
2374                                         },
2375                                         _ => {}
2376                                 }
2377                         }
2378                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2379                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2380                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2381                         });
2382                         #[cfg(any(test, fuzzing))]
2383                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2384                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2385                 }
2386                 #[cfg(any(test, fuzzing))]
2387                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2388
2389                 {
2390                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2391                         if let InboundHTLCState::Committed = htlc.state {
2392                         } else {
2393                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2394                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2395                         }
2396                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2397                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2398                 }
2399
2400                 UpdateFulfillFetch::NewClaim {
2401                         monitor_update,
2402                         htlc_value_msat,
2403                         msg: Some(msgs::UpdateFulfillHTLC {
2404                                 channel_id: self.context.channel_id(),
2405                                 htlc_id: htlc_id_arg,
2406                                 payment_preimage: payment_preimage_arg,
2407                         }),
2408                 }
2409         }
2410
2411         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2412                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2413                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2414                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2415                                 // Even if we aren't supposed to let new monitor updates with commitment state
2416                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2417                                 // matter what. Sadly, to push a new monitor update which flies before others
2418                                 // already queued, we have to insert it into the pending queue and update the
2419                                 // update_ids of all the following monitors.
2420                                 if release_cs_monitor && msg.is_some() {
2421                                         let mut additional_update = self.build_commitment_no_status_check(logger);
2422                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
2423                                         // to be strictly increasing by one, so decrement it here.
2424                                         self.context.latest_monitor_update_id = monitor_update.update_id;
2425                                         monitor_update.updates.append(&mut additional_update.updates);
2426                                 } else {
2427                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
2428                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2429                                         monitor_update.update_id = new_mon_id;
2430                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
2431                                                 held_update.update.update_id += 1;
2432                                         }
2433                                         if msg.is_some() {
2434                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2435                                                 let update = self.build_commitment_no_status_check(logger);
2436                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2437                                                         update,
2438                                                 });
2439                                         }
2440                                 }
2441
2442                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2443                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2444                         },
2445                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2446                 }
2447         }
2448
2449         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2450         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2451         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2452         /// before we fail backwards.
2453         ///
2454         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2455         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2456         /// [`ChannelError::Ignore`].
2457         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2458         -> Result<(), ChannelError> where L::Target: Logger {
2459                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2460                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2461         }
2462
2463         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2464         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2465         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2466         /// before we fail backwards.
2467         ///
2468         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2469         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2470         /// [`ChannelError::Ignore`].
2471         fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2472         -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2473                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2474                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
2475                 }
2476                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2477
2478                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2479                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2480                 // these, but for now we just have to treat them as normal.
2481
2482                 let mut pending_idx = core::usize::MAX;
2483                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2484                         if htlc.htlc_id == htlc_id_arg {
2485                                 match htlc.state {
2486                                         InboundHTLCState::Committed => {},
2487                                         InboundHTLCState::LocalRemoved(ref reason) => {
2488                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2489                                                 } else {
2490                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2491                                                 }
2492                                                 return Ok(None);
2493                                         },
2494                                         _ => {
2495                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2496                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2497                                         }
2498                                 }
2499                                 pending_idx = idx;
2500                         }
2501                 }
2502                 if pending_idx == core::usize::MAX {
2503                         #[cfg(any(test, fuzzing))]
2504                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2505                         // is simply a duplicate fail, not previously failed and we failed-back too early.
2506                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2507                         return Ok(None);
2508                 }
2509
2510                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2511                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2512                         force_holding_cell = true;
2513                 }
2514
2515                 // Now update local state:
2516                 if force_holding_cell {
2517                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2518                                 match pending_update {
2519                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2520                                                 if htlc_id_arg == htlc_id {
2521                                                         #[cfg(any(test, fuzzing))]
2522                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2523                                                         return Ok(None);
2524                                                 }
2525                                         },
2526                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2527                                                 if htlc_id_arg == htlc_id {
2528                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2529                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2530                                                 }
2531                                         },
2532                                         _ => {}
2533                                 }
2534                         }
2535                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2536                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2537                                 htlc_id: htlc_id_arg,
2538                                 err_packet,
2539                         });
2540                         return Ok(None);
2541                 }
2542
2543                 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2544                 {
2545                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2546                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2547                 }
2548
2549                 Ok(Some(msgs::UpdateFailHTLC {
2550                         channel_id: self.context.channel_id(),
2551                         htlc_id: htlc_id_arg,
2552                         reason: err_packet
2553                 }))
2554         }
2555
2556         // Message handlers:
2557
2558         /// Handles a funding_signed message from the remote end.
2559         /// If this call is successful, broadcast the funding transaction (and not before!)
2560         pub fn funding_signed<L: Deref>(
2561                 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2562         ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2563         where
2564                 L::Target: Logger
2565         {
2566                 if !self.context.is_outbound() {
2567                         return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2568                 }
2569                 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2570                         return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2571                 }
2572                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2573                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2574                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2575                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2576                 }
2577
2578                 let funding_script = self.context.get_funding_redeemscript();
2579
2580                 let counterparty_keys = self.context.build_remote_transaction_keys();
2581                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2582                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2583                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2584
2585                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2586                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2587
2588                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2589                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2590                 {
2591                         let trusted_tx = initial_commitment_tx.trust();
2592                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2593                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2594                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2595                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2596                                 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2597                         }
2598                 }
2599
2600                 let holder_commitment_tx = HolderCommitmentTransaction::new(
2601                         initial_commitment_tx,
2602                         msg.signature,
2603                         Vec::new(),
2604                         &self.context.get_holder_pubkeys().funding_pubkey,
2605                         self.context.counterparty_funding_pubkey()
2606                 );
2607
2608                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2609                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2610
2611
2612                 let funding_redeemscript = self.context.get_funding_redeemscript();
2613                 let funding_txo = self.context.get_funding_txo().unwrap();
2614                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2615                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2616                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2617                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2618                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2619                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2620                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
2621                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
2622                                                           &self.context.channel_transaction_parameters,
2623                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
2624                                                           obscure_factor,
2625                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
2626
2627                 channel_monitor.provide_initial_counterparty_commitment_tx(
2628                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
2629                         self.context.cur_counterparty_commitment_transaction_number,
2630                         self.context.counterparty_cur_commitment_point.unwrap(),
2631                         counterparty_initial_commitment_tx.feerate_per_kw(),
2632                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2633                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2634
2635                 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2636                 if self.context.is_batch_funding() {
2637                         self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2638                 } else {
2639                         self.context.channel_state = ChannelState::FundingSent as u32;
2640                 }
2641                 self.context.cur_holder_commitment_transaction_number -= 1;
2642                 self.context.cur_counterparty_commitment_transaction_number -= 1;
2643
2644                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2645
2646                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2647                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2648                 Ok(channel_monitor)
2649         }
2650
2651         /// Updates the state of the channel to indicate that all channels in the batch have received
2652         /// funding_signed and persisted their monitors.
2653         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2654         /// treated as a non-batch channel going forward.
2655         pub fn set_batch_ready(&mut self) {
2656                 self.context.is_batch_funding = None;
2657                 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2658         }
2659
2660         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2661         /// and the channel is now usable (and public), this may generate an announcement_signatures to
2662         /// reply with.
2663         pub fn channel_ready<NS: Deref, L: Deref>(
2664                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
2665                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2666         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2667         where
2668                 NS::Target: NodeSigner,
2669                 L::Target: Logger
2670         {
2671                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2672                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2673                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2674                 }
2675
2676                 if let Some(scid_alias) = msg.short_channel_id_alias {
2677                         if Some(scid_alias) != self.context.short_channel_id {
2678                                 // The scid alias provided can be used to route payments *from* our counterparty,
2679                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
2680                                 // when routing outbound payments.
2681                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
2682                         }
2683                 }
2684
2685                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2686
2687                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2688                 // batch, but we can receive channel_ready messages.
2689                 debug_assert!(
2690                         non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2691                         non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2692                 );
2693                 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2694                         self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2695                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2696                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2697                         self.context.update_time_counter += 1;
2698                 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2699                         // If we reconnected before sending our `channel_ready` they may still resend theirs:
2700                         (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2701                                               (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2702                 {
2703                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
2704                         // required, or they're sending a fresh SCID alias.
2705                         let expected_point =
2706                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2707                                         // If they haven't ever sent an updated point, the point they send should match
2708                                         // the current one.
2709                                         self.context.counterparty_cur_commitment_point
2710                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2711                                         // If we've advanced the commitment number once, the second commitment point is
2712                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
2713                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2714                                         self.context.counterparty_prev_commitment_point
2715                                 } else {
2716                                         // If they have sent updated points, channel_ready is always supposed to match
2717                                         // their "first" point, which we re-derive here.
2718                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2719                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2720                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
2721                                 };
2722                         if expected_point != Some(msg.next_per_commitment_point) {
2723                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2724                         }
2725                         return Ok(None);
2726                 } else {
2727                         return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2728                 }
2729
2730                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2731                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2732
2733                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2734
2735                 Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
2736         }
2737
2738         pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2739                 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2740                 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2741         ) -> Result<(), ChannelError>
2742         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2743                 FE::Target: FeeEstimator, L::Target: Logger,
2744         {
2745                 // We can't accept HTLCs sent after we've sent a shutdown.
2746                 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2747                 if local_sent_shutdown {
2748                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2749                 }
2750                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2751                 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2752                 if remote_sent_shutdown {
2753                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2754                 }
2755                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2756                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2757                 }
2758                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2759                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2760                 }
2761                 if msg.amount_msat == 0 {
2762                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2763                 }
2764                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2765                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2766                 }
2767
2768                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2769                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2770                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2771                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2772                 }
2773                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2774                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2775                 }
2776                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2777                 // the reserve_satoshis we told them to always have as direct payment so that they lose
2778                 // something if we punish them for broadcasting an old state).
2779                 // Note that we don't really care about having a small/no to_remote output in our local
2780                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2781                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2782                 // present in the next commitment transaction we send them (at least for fulfilled ones,
2783                 // failed ones won't modify value_to_self).
2784                 // Note that we will send HTLCs which another instance of rust-lightning would think
2785                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2786                 // Channel state once they will not be present in the next received commitment
2787                 // transaction).
2788                 let mut removed_outbound_total_msat = 0;
2789                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2790                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2791                                 removed_outbound_total_msat += htlc.amount_msat;
2792                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2793                                 removed_outbound_total_msat += htlc.amount_msat;
2794                         }
2795                 }
2796
2797                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2798                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2799                         (0, 0)
2800                 } else {
2801                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2802                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2803                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2804                 };
2805                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2806                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2807                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2808                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2809                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2810                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2811                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2812                         }
2813                 }
2814
2815                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2816                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2817                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2818                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2819                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2820                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2821                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2822                         }
2823                 }
2824
2825                 let pending_value_to_self_msat =
2826                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2827                 let pending_remote_value_msat =
2828                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2829                 if pending_remote_value_msat < msg.amount_msat {
2830                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2831                 }
2832
2833                 // Check that the remote can afford to pay for this HTLC on-chain at the current
2834                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2835                 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2836                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2837                         self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2838                 };
2839                 if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
2840                         return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2841                 };
2842
2843                 if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2844                         return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2845                 }
2846
2847                 if !self.context.is_outbound() {
2848                         // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2849                         // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
2850                         // receiver's side, only on the sender's.
2851                         // Note that when we eventually remove support for fee updates and switch to anchor output
2852                         // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
2853                         // the extra htlc when calculating the next remote commitment transaction fee as we should
2854                         // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
2855                         // sensitive to fee spikes.
2856                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2857                         let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2858                         if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
2859                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2860                                 // the HTLC, i.e. its status is already set to failing.
2861                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2862                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2863                         }
2864                 } else {
2865                         // Check that they won't violate our local required channel reserve by adding this HTLC.
2866                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2867                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2868                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
2869                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2870                         }
2871                 }
2872                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2873                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2874                 }
2875                 if msg.cltv_expiry >= 500000000 {
2876                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2877                 }
2878
2879                 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2880                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2881                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2882                         }
2883                 }
2884
2885                 // Now update local state:
2886                 self.context.next_counterparty_htlc_id += 1;
2887                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2888                         htlc_id: msg.htlc_id,
2889                         amount_msat: msg.amount_msat,
2890                         payment_hash: msg.payment_hash,
2891                         cltv_expiry: msg.cltv_expiry,
2892                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
2893                 });
2894                 Ok(())
2895         }
2896
2897         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2898         #[inline]
2899         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
2900                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
2901                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
2902                         if htlc.htlc_id == htlc_id {
2903                                 let outcome = match check_preimage {
2904                                         None => fail_reason.into(),
2905                                         Some(payment_preimage) => {
2906                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
2907                                                 if payment_hash != htlc.payment_hash {
2908                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
2909                                                 }
2910                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
2911                                         }
2912                                 };
2913                                 match htlc.state {
2914                                         OutboundHTLCState::LocalAnnounced(_) =>
2915                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
2916                                         OutboundHTLCState::Committed => {
2917                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
2918                                         },
2919                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
2920                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
2921                                 }
2922                                 return Ok(htlc);
2923                         }
2924                 }
2925                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
2926         }
2927
2928         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
2929                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2930                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
2931                 }
2932                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2933                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
2934                 }
2935
2936                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
2937         }
2938
2939         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2940                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2941                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
2942                 }
2943                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2944                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
2945                 }
2946
2947                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2948                 Ok(())
2949         }
2950
2951         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2952                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2953                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
2954                 }
2955                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2956                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
2957                 }
2958
2959                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2960                 Ok(())
2961         }
2962
2963         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
2964                 where L::Target: Logger
2965         {
2966                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2967                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
2968                 }
2969                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2970                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
2971                 }
2972                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
2973                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
2974                 }
2975
2976                 let funding_script = self.context.get_funding_redeemscript();
2977
2978                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2979
2980                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
2981                 let commitment_txid = {
2982                         let trusted_tx = commitment_stats.tx.trust();
2983                         let bitcoin_tx = trusted_tx.built_transaction();
2984                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2985
2986                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
2987                                 log_bytes!(msg.signature.serialize_compact()[..]),
2988                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
2989                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
2990                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
2991                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
2992                         }
2993                         bitcoin_tx.txid
2994                 };
2995                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
2996
2997                 // If our counterparty updated the channel fee in this commitment transaction, check that
2998                 // they can actually afford the new fee now.
2999                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3000                         update_state == FeeUpdateState::RemoteAnnounced
3001                 } else { false };
3002                 if update_fee {
3003                         debug_assert!(!self.context.is_outbound());
3004                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3005                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3006                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3007                         }
3008                 }
3009                 #[cfg(any(test, fuzzing))]
3010                 {
3011                         if self.context.is_outbound() {
3012                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3013                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3014                                 if let Some(info) = projected_commit_tx_info {
3015                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3016                                                 + self.context.holding_cell_htlc_updates.len();
3017                                         if info.total_pending_htlcs == total_pending_htlcs
3018                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3019                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3020                                                 && info.feerate == self.context.feerate_per_kw {
3021                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3022                                                 }
3023                                 }
3024                         }
3025                 }
3026
3027                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3028                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3029                 }
3030
3031                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3032                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3033                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3034                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3035                 // backwards compatibility, we never use it in production. To provide test coverage, here,
3036                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3037                 #[allow(unused_assignments, unused_mut)]
3038                 let mut separate_nondust_htlc_sources = false;
3039                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3040                         use core::hash::{BuildHasher, Hasher};
3041                         // Get a random value using the only std API to do so - the DefaultHasher
3042                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3043                         separate_nondust_htlc_sources = rand_val % 2 == 0;
3044                 }
3045
3046                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3047                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3048                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3049                         if let Some(_) = htlc.transaction_output_index {
3050                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3051                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3052                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3053
3054                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3055                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3056                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3057                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3058                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3059                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3060                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3061                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3062                                 }
3063                                 if !separate_nondust_htlc_sources {
3064                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3065                                 }
3066                         } else {
3067                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3068                         }
3069                         if separate_nondust_htlc_sources {
3070                                 if let Some(source) = source_opt.take() {
3071                                         nondust_htlc_sources.push(source);
3072                                 }
3073                         }
3074                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3075                 }
3076
3077                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3078                         commitment_stats.tx,
3079                         msg.signature,
3080                         msg.htlc_signatures.clone(),
3081                         &self.context.get_holder_pubkeys().funding_pubkey,
3082                         self.context.counterparty_funding_pubkey()
3083                 );
3084
3085                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3086                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3087
3088                 // Update state now that we've passed all the can-fail calls...
3089                 let mut need_commitment = false;
3090                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3091                         if *update_state == FeeUpdateState::RemoteAnnounced {
3092                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3093                                 need_commitment = true;
3094                         }
3095                 }
3096
3097                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3098                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3099                                 Some(forward_info.clone())
3100                         } else { None };
3101                         if let Some(forward_info) = new_forward {
3102                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3103                                         &htlc.payment_hash, &self.context.channel_id);
3104                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3105                                 need_commitment = true;
3106                         }
3107                 }
3108                 let mut claimed_htlcs = Vec::new();
3109                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3110                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3111                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3112                                         &htlc.payment_hash, &self.context.channel_id);
3113                                 // Grab the preimage, if it exists, instead of cloning
3114                                 let mut reason = OutboundHTLCOutcome::Success(None);
3115                                 mem::swap(outcome, &mut reason);
3116                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3117                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3118                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3119                                         // have a `Success(None)` reason. In this case we could forget some HTLC
3120                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
3121                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
3122                                         // claim anyway.
3123                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3124                                 }
3125                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3126                                 need_commitment = true;
3127                         }
3128                 }
3129
3130                 self.context.latest_monitor_update_id += 1;
3131                 let mut monitor_update = ChannelMonitorUpdate {
3132                         update_id: self.context.latest_monitor_update_id,
3133                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3134                                 commitment_tx: holder_commitment_tx,
3135                                 htlc_outputs: htlcs_and_sigs,
3136                                 claimed_htlcs,
3137                                 nondust_htlc_sources,
3138                         }]
3139                 };
3140
3141                 self.context.cur_holder_commitment_transaction_number -= 1;
3142                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3143                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3144                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3145
3146                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3147                         // In case we initially failed monitor updating without requiring a response, we need
3148                         // to make sure the RAA gets sent first.
3149                         self.context.monitor_pending_revoke_and_ack = true;
3150                         if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3151                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3152                                 // the corresponding HTLC status updates so that get_last_commitment_update
3153                                 // includes the right HTLCs.
3154                                 self.context.monitor_pending_commitment_signed = true;
3155                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3156                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3157                                 // strictly increasing by one, so decrement it here.
3158                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3159                                 monitor_update.updates.append(&mut additional_update.updates);
3160                         }
3161                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3162                                 &self.context.channel_id);
3163                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
3164                 }
3165
3166                 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3167                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3168                         // we'll send one right away when we get the revoke_and_ack when we
3169                         // free_holding_cell_htlcs().
3170                         let mut additional_update = self.build_commitment_no_status_check(logger);
3171                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3172                         // strictly increasing by one, so decrement it here.
3173                         self.context.latest_monitor_update_id = monitor_update.update_id;
3174                         monitor_update.updates.append(&mut additional_update.updates);
3175                         true
3176                 } else { false };
3177
3178                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3179                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3180                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3181                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3182         }
3183
3184         /// Public version of the below, checking relevant preconditions first.
3185         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3186         /// returns `(None, Vec::new())`.
3187         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3188                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3189         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3190         where F::Target: FeeEstimator, L::Target: Logger
3191         {
3192                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3193                    (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3194                         self.free_holding_cell_htlcs(fee_estimator, logger)
3195                 } else { (None, Vec::new()) }
3196         }
3197
3198         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3199         /// for our counterparty.
3200         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3201                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3202         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3203         where F::Target: FeeEstimator, L::Target: Logger
3204         {
3205                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3206                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3207                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3208                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3209
3210                         let mut monitor_update = ChannelMonitorUpdate {
3211                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3212                                 updates: Vec::new(),
3213                         };
3214
3215                         let mut htlc_updates = Vec::new();
3216                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3217                         let mut update_add_count = 0;
3218                         let mut update_fulfill_count = 0;
3219                         let mut update_fail_count = 0;
3220                         let mut htlcs_to_fail = Vec::new();
3221                         for htlc_update in htlc_updates.drain(..) {
3222                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
3223                                 // fee races with adding too many outputs which push our total payments just over
3224                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
3225                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3226                                 // to rebalance channels.
3227                                 match &htlc_update {
3228                                         &HTLCUpdateAwaitingACK::AddHTLC {
3229                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3230                                                 skimmed_fee_msat, ..
3231                                         } => {
3232                                                 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3233                                                         onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3234                                                 {
3235                                                         Ok(_) => update_add_count += 1,
3236                                                         Err(e) => {
3237                                                                 match e {
3238                                                                         ChannelError::Ignore(ref msg) => {
3239                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3240                                                                                 // If we fail to send here, then this HTLC should
3241                                                                                 // be failed backwards. Failing to send here
3242                                                                                 // indicates that this HTLC may keep being put back
3243                                                                                 // into the holding cell without ever being
3244                                                                                 // successfully forwarded/failed/fulfilled, causing
3245                                                                                 // our counterparty to eventually close on us.
3246                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
3247                                                                         },
3248                                                                         _ => {
3249                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3250                                                                         },
3251                                                                 }
3252                                                         }
3253                                                 }
3254                                         },
3255                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3256                                                 // If an HTLC claim was previously added to the holding cell (via
3257                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
3258                                                 // not fail - any in between attempts to claim the HTLC will have resulted
3259                                                 // in it hitting the holding cell again and we cannot change the state of a
3260                                                 // holding cell HTLC from fulfill to anything else.
3261                                                 let mut additional_monitor_update =
3262                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3263                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3264                                                         { monitor_update } else { unreachable!() };
3265                                                 update_fulfill_count += 1;
3266                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
3267                                         },
3268                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3269                                                 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3270                                                         Ok(update_fail_msg_option) => {
3271                                                                 // If an HTLC failure was previously added to the holding cell (via
3272                                                                 // `queue_fail_htlc`) then generating the fail message itself must
3273                                                                 // not fail - we should never end up in a state where we double-fail
3274                                                                 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3275                                                                 // for a full revocation before failing.
3276                                                                 debug_assert!(update_fail_msg_option.is_some());
3277                                                                 update_fail_count += 1;
3278                                                         },
3279                                                         Err(e) => {
3280                                                                 if let ChannelError::Ignore(_) = e {}
3281                                                                 else {
3282                                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3283                                                                 }
3284                                                         }
3285                                                 }
3286                                         },
3287                                 }
3288                         }
3289                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3290                                 return (None, htlcs_to_fail);
3291                         }
3292                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3293                                 self.send_update_fee(feerate, false, fee_estimator, logger)
3294                         } else {
3295                                 None
3296                         };
3297
3298                         let mut additional_update = self.build_commitment_no_status_check(logger);
3299                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3300                         // but we want them to be strictly increasing by one, so reset it here.
3301                         self.context.latest_monitor_update_id = monitor_update.update_id;
3302                         monitor_update.updates.append(&mut additional_update.updates);
3303
3304                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3305                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3306                                 update_add_count, update_fulfill_count, update_fail_count);
3307
3308                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3309                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3310                 } else {
3311                         (None, Vec::new())
3312                 }
3313         }
3314
3315         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3316         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3317         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3318         /// generating an appropriate error *after* the channel state has been updated based on the
3319         /// revoke_and_ack message.
3320         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3321                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3322         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3323         where F::Target: FeeEstimator, L::Target: Logger,
3324         {
3325                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3326                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3327                 }
3328                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3329                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3330                 }
3331                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3332                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3333                 }
3334
3335                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3336
3337                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3338                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3339                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3340                         }
3341                 }
3342
3343                 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3344                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
3345                         // haven't given them a new commitment transaction to broadcast). We should probably
3346                         // take advantage of this by updating our channel monitor, sending them an error, and
3347                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3348                         // lot of work, and there's some chance this is all a misunderstanding anyway.
3349                         // We have to do *something*, though, since our signer may get mad at us for otherwise
3350                         // jumping a remote commitment number, so best to just force-close and move on.
3351                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3352                 }
3353
3354                 #[cfg(any(test, fuzzing))]
3355                 {
3356                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3357                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3358                 }
3359
3360                 match &self.context.holder_signer {
3361                         ChannelSignerType::Ecdsa(ecdsa) => {
3362                                 ecdsa.validate_counterparty_revocation(
3363                                         self.context.cur_counterparty_commitment_transaction_number + 1,
3364                                         &secret
3365                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3366                         }
3367                 };
3368
3369                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3370                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3371                 self.context.latest_monitor_update_id += 1;
3372                 let mut monitor_update = ChannelMonitorUpdate {
3373                         update_id: self.context.latest_monitor_update_id,
3374                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3375                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3376                                 secret: msg.per_commitment_secret,
3377                         }],
3378                 };
3379
3380                 // Update state now that we've passed all the can-fail calls...
3381                 // (note that we may still fail to generate the new commitment_signed message, but that's
3382                 // OK, we step the channel here and *then* if the new generation fails we can fail the
3383                 // channel based on that, but stepping stuff here should be safe either way.
3384                 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3385                 self.context.sent_message_awaiting_response = None;
3386                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3387                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3388                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3389
3390                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3391                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3392                 }
3393
3394                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3395                 let mut to_forward_infos = Vec::new();
3396                 let mut revoked_htlcs = Vec::new();
3397                 let mut finalized_claimed_htlcs = Vec::new();
3398                 let mut update_fail_htlcs = Vec::new();
3399                 let mut update_fail_malformed_htlcs = Vec::new();
3400                 let mut require_commitment = false;
3401                 let mut value_to_self_msat_diff: i64 = 0;
3402
3403                 {
3404                         // Take references explicitly so that we can hold multiple references to self.context.
3405                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3406                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3407
3408                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3409                         pending_inbound_htlcs.retain(|htlc| {
3410                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3411                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3412                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3413                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
3414                                         }
3415                                         false
3416                                 } else { true }
3417                         });
3418                         pending_outbound_htlcs.retain(|htlc| {
3419                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3420                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3421                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3422                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3423                                         } else {
3424                                                 finalized_claimed_htlcs.push(htlc.source.clone());
3425                                                 // They fulfilled, so we sent them money
3426                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
3427                                         }
3428                                         false
3429                                 } else { true }
3430                         });
3431                         for htlc in pending_inbound_htlcs.iter_mut() {
3432                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3433                                         true
3434                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3435                                         true
3436                                 } else { false };
3437                                 if swap {
3438                                         let mut state = InboundHTLCState::Committed;
3439                                         mem::swap(&mut state, &mut htlc.state);
3440
3441                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3442                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3443                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3444                                                 require_commitment = true;
3445                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3446                                                 match forward_info {
3447                                                         PendingHTLCStatus::Fail(fail_msg) => {
3448                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3449                                                                 require_commitment = true;
3450                                                                 match fail_msg {
3451                                                                         HTLCFailureMsg::Relay(msg) => {
3452                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3453                                                                                 update_fail_htlcs.push(msg)
3454                                                                         },
3455                                                                         HTLCFailureMsg::Malformed(msg) => {
3456                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3457                                                                                 update_fail_malformed_htlcs.push(msg)
3458                                                                         },
3459                                                                 }
3460                                                         },
3461                                                         PendingHTLCStatus::Forward(forward_info) => {
3462                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3463                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
3464                                                                 htlc.state = InboundHTLCState::Committed;
3465                                                         }
3466                                                 }
3467                                         }
3468                                 }
3469                         }
3470                         for htlc in pending_outbound_htlcs.iter_mut() {
3471                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3472                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3473                                         htlc.state = OutboundHTLCState::Committed;
3474                                 }
3475                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3476                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3477                                         // Grab the preimage, if it exists, instead of cloning
3478                                         let mut reason = OutboundHTLCOutcome::Success(None);
3479                                         mem::swap(outcome, &mut reason);
3480                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3481                                         require_commitment = true;
3482                                 }
3483                         }
3484                 }
3485                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3486
3487                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3488                         match update_state {
3489                                 FeeUpdateState::Outbound => {
3490                                         debug_assert!(self.context.is_outbound());
3491                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3492                                         self.context.feerate_per_kw = feerate;
3493                                         self.context.pending_update_fee = None;
3494                                 },
3495                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3496                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3497                                         debug_assert!(!self.context.is_outbound());
3498                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3499                                         require_commitment = true;
3500                                         self.context.feerate_per_kw = feerate;
3501                                         self.context.pending_update_fee = None;
3502                                 },
3503                         }
3504                 }
3505
3506                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3507                 let release_state_str =
3508                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3509                 macro_rules! return_with_htlcs_to_fail {
3510                         ($htlcs_to_fail: expr) => {
3511                                 if !release_monitor {
3512                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3513                                                 update: monitor_update,
3514                                         });
3515                                         return Ok(($htlcs_to_fail, None));
3516                                 } else {
3517                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
3518                                 }
3519                         }
3520                 }
3521
3522                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3523                         // We can't actually generate a new commitment transaction (incl by freeing holding
3524                         // cells) while we can't update the monitor, so we just return what we have.
3525                         if require_commitment {
3526                                 self.context.monitor_pending_commitment_signed = true;
3527                                 // When the monitor updating is restored we'll call get_last_commitment_update(),
3528                                 // which does not update state, but we're definitely now awaiting a remote revoke
3529                                 // before we can step forward any more, so set it here.
3530                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3531                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3532                                 // strictly increasing by one, so decrement it here.
3533                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3534                                 monitor_update.updates.append(&mut additional_update.updates);
3535                         }
3536                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3537                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3538                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3539                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3540                         return_with_htlcs_to_fail!(Vec::new());
3541                 }
3542
3543                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3544                         (Some(mut additional_update), htlcs_to_fail) => {
3545                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3546                                 // strictly increasing by one, so decrement it here.
3547                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3548                                 monitor_update.updates.append(&mut additional_update.updates);
3549
3550                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3551                                         &self.context.channel_id(), release_state_str);
3552
3553                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3554                                 return_with_htlcs_to_fail!(htlcs_to_fail);
3555                         },
3556                         (None, htlcs_to_fail) => {
3557                                 if require_commitment {
3558                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3559
3560                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3561                                         // strictly increasing by one, so decrement it here.
3562                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3563                                         monitor_update.updates.append(&mut additional_update.updates);
3564
3565                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3566                                                 &self.context.channel_id(),
3567                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3568                                                 release_state_str);
3569
3570                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3571                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3572                                 } else {
3573                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3574                                                 &self.context.channel_id(), release_state_str);
3575
3576                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3577                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3578                                 }
3579                         }
3580                 }
3581         }
3582
3583         /// Queues up an outbound update fee by placing it in the holding cell. You should call
3584         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3585         /// commitment update.
3586         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3587                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3588         where F::Target: FeeEstimator, L::Target: Logger
3589         {
3590                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3591                 assert!(msg_opt.is_none(), "We forced holding cell?");
3592         }
3593
3594         /// Adds a pending update to this channel. See the doc for send_htlc for
3595         /// further details on the optionness of the return value.
3596         /// If our balance is too low to cover the cost of the next commitment transaction at the
3597         /// new feerate, the update is cancelled.
3598         ///
3599         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3600         /// [`Channel`] if `force_holding_cell` is false.
3601         fn send_update_fee<F: Deref, L: Deref>(
3602                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3603                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3604         ) -> Option<msgs::UpdateFee>
3605         where F::Target: FeeEstimator, L::Target: Logger
3606         {
3607                 if !self.context.is_outbound() {
3608                         panic!("Cannot send fee from inbound channel");
3609                 }
3610                 if !self.context.is_usable() {
3611                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3612                 }
3613                 if !self.context.is_live() {
3614                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3615                 }
3616
3617                 // Before proposing a feerate update, check that we can actually afford the new fee.
3618                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3619                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3620                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3621                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3622                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3623                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3624                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3625                         //TODO: auto-close after a number of failures?
3626                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3627                         return None;
3628                 }
3629
3630                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3631                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3632                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3633                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3634                 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3635                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3636                         return None;
3637                 }
3638                 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3639                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3640                         return None;
3641                 }
3642
3643                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3644                         force_holding_cell = true;
3645                 }
3646
3647                 if force_holding_cell {
3648                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
3649                         return None;
3650                 }
3651
3652                 debug_assert!(self.context.pending_update_fee.is_none());
3653                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3654
3655                 Some(msgs::UpdateFee {
3656                         channel_id: self.context.channel_id,
3657                         feerate_per_kw,
3658                 })
3659         }
3660
3661         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3662         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3663         /// resent.
3664         /// No further message handling calls may be made until a channel_reestablish dance has
3665         /// completed.
3666         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3667         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3668                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3669                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3670                         return Err(());
3671                 }
3672
3673                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3674                         // While the below code should be idempotent, it's simpler to just return early, as
3675                         // redundant disconnect events can fire, though they should be rare.
3676                         return Ok(());
3677                 }
3678
3679                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3680                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3681                 }
3682
3683                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3684                 // will be retransmitted.
3685                 self.context.last_sent_closing_fee = None;
3686                 self.context.pending_counterparty_closing_signed = None;
3687                 self.context.closing_fee_limits = None;
3688
3689                 let mut inbound_drop_count = 0;
3690                 self.context.pending_inbound_htlcs.retain(|htlc| {
3691                         match htlc.state {
3692                                 InboundHTLCState::RemoteAnnounced(_) => {
3693                                         // They sent us an update_add_htlc but we never got the commitment_signed.
3694                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
3695                                         // this HTLC accordingly
3696                                         inbound_drop_count += 1;
3697                                         false
3698                                 },
3699                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3700                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
3701                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3702                                         // in response to it yet, so don't touch it.
3703                                         true
3704                                 },
3705                                 InboundHTLCState::Committed => true,
3706                                 InboundHTLCState::LocalRemoved(_) => {
3707                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3708                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
3709                                         // (that we missed). Keep this around for now and if they tell us they missed
3710                                         // the commitment_signed we can re-transmit the update then.
3711                                         true
3712                                 },
3713                         }
3714                 });
3715                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3716
3717                 if let Some((_, update_state)) = self.context.pending_update_fee {
3718                         if update_state == FeeUpdateState::RemoteAnnounced {
3719                                 debug_assert!(!self.context.is_outbound());
3720                                 self.context.pending_update_fee = None;
3721                         }
3722                 }
3723
3724                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3725                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3726                                 // They sent us an update to remove this but haven't yet sent the corresponding
3727                                 // commitment_signed, we need to move it back to Committed and they can re-send
3728                                 // the update upon reconnection.
3729                                 htlc.state = OutboundHTLCState::Committed;
3730                         }
3731                 }
3732
3733                 self.context.sent_message_awaiting_response = None;
3734
3735                 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3736                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3737                 Ok(())
3738         }
3739
3740         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3741         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3742         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3743         /// update completes (potentially immediately).
3744         /// The messages which were generated with the monitor update must *not* have been sent to the
3745         /// remote end, and must instead have been dropped. They will be regenerated when
3746         /// [`Self::monitor_updating_restored`] is called.
3747         ///
3748         /// [`ChannelManager`]: super::channelmanager::ChannelManager
3749         /// [`chain::Watch`]: crate::chain::Watch
3750         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3751         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3752                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3753                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3754                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3755         ) {
3756                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3757                 self.context.monitor_pending_commitment_signed |= resend_commitment;
3758                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3759                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3760                 self.context.monitor_pending_failures.append(&mut pending_fails);
3761                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3762                 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3763         }
3764
3765         /// Indicates that the latest ChannelMonitor update has been committed by the client
3766         /// successfully and we should restore normal operation. Returns messages which should be sent
3767         /// to the remote side.
3768         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3769                 &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
3770                 user_config: &UserConfig, best_block_height: u32
3771         ) -> MonitorRestoreUpdates
3772         where
3773                 L::Target: Logger,
3774                 NS::Target: NodeSigner
3775         {
3776                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3777                 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3778
3779                 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3780                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3781                 // first received the funding_signed.
3782                 let mut funding_broadcastable =
3783                         if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3784                                 self.context.funding_transaction.take()
3785                         } else { None };
3786                 // That said, if the funding transaction is already confirmed (ie we're active with a
3787                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3788                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3789                         funding_broadcastable = None;
3790                 }
3791
3792                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3793                 // (and we assume the user never directly broadcasts the funding transaction and waits for
3794                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3795                 // * an inbound channel that failed to persist the monitor on funding_created and we got
3796                 //   the funding transaction confirmed before the monitor was persisted, or
3797                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3798                 let channel_ready = if self.context.monitor_pending_channel_ready {
3799                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3800                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3801                         self.context.monitor_pending_channel_ready = false;
3802                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3803                         Some(msgs::ChannelReady {
3804                                 channel_id: self.context.channel_id(),
3805                                 next_per_commitment_point,
3806                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3807                         })
3808                 } else { None };
3809
3810                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
3811
3812                 let mut accepted_htlcs = Vec::new();
3813                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3814                 let mut failed_htlcs = Vec::new();
3815                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3816                 let mut finalized_claimed_htlcs = Vec::new();
3817                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3818
3819                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3820                         self.context.monitor_pending_revoke_and_ack = false;
3821                         self.context.monitor_pending_commitment_signed = false;
3822                         return MonitorRestoreUpdates {
3823                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3824                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3825                         };
3826                 }
3827
3828                 let raa = if self.context.monitor_pending_revoke_and_ack {
3829                         Some(self.get_last_revoke_and_ack())
3830                 } else { None };
3831                 let commitment_update = if self.context.monitor_pending_commitment_signed {
3832                         self.mark_awaiting_response();
3833                         Some(self.get_last_commitment_update(logger))
3834                 } else { None };
3835
3836                 self.context.monitor_pending_revoke_and_ack = false;
3837                 self.context.monitor_pending_commitment_signed = false;
3838                 let order = self.context.resend_order.clone();
3839                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3840                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3841                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3842                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3843                 MonitorRestoreUpdates {
3844                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3845                 }
3846         }
3847
3848         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3849                 where F::Target: FeeEstimator, L::Target: Logger
3850         {
3851                 if self.context.is_outbound() {
3852                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3853                 }
3854                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3855                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3856                 }
3857                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3858                 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3859
3860                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3861                 self.context.update_time_counter += 1;
3862                 // If the feerate has increased over the previous dust buffer (note that
3863                 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3864                 // won't be pushed over our dust exposure limit by the feerate increase.
3865                 if feerate_over_dust_buffer {
3866                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3867                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3868                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3869                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3870                         let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3871                         if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3872                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3873                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
3874                         }
3875                         if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3876                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3877                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3878                         }
3879                 }
3880                 Ok(())
3881         }
3882
3883         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
3884                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3885                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
3886                 msgs::RevokeAndACK {
3887                         channel_id: self.context.channel_id,
3888                         per_commitment_secret,
3889                         next_per_commitment_point,
3890                         #[cfg(taproot)]
3891                         next_local_nonce: None,
3892                 }
3893         }
3894
3895         fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
3896                 let mut update_add_htlcs = Vec::new();
3897                 let mut update_fulfill_htlcs = Vec::new();
3898                 let mut update_fail_htlcs = Vec::new();
3899                 let mut update_fail_malformed_htlcs = Vec::new();
3900
3901                 for htlc in self.context.pending_outbound_htlcs.iter() {
3902                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
3903                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
3904                                         channel_id: self.context.channel_id(),
3905                                         htlc_id: htlc.htlc_id,
3906                                         amount_msat: htlc.amount_msat,
3907                                         payment_hash: htlc.payment_hash,
3908                                         cltv_expiry: htlc.cltv_expiry,
3909                                         onion_routing_packet: (**onion_packet).clone(),
3910                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
3911                                 });
3912                         }
3913                 }
3914
3915                 for htlc in self.context.pending_inbound_htlcs.iter() {
3916                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3917                                 match reason {
3918                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
3919                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
3920                                                         channel_id: self.context.channel_id(),
3921                                                         htlc_id: htlc.htlc_id,
3922                                                         reason: err_packet.clone()
3923                                                 });
3924                                         },
3925                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
3926                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
3927                                                         channel_id: self.context.channel_id(),
3928                                                         htlc_id: htlc.htlc_id,
3929                                                         sha256_of_onion: sha256_of_onion.clone(),
3930                                                         failure_code: failure_code.clone(),
3931                                                 });
3932                                         },
3933                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
3934                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
3935                                                         channel_id: self.context.channel_id(),
3936                                                         htlc_id: htlc.htlc_id,
3937                                                         payment_preimage: payment_preimage.clone(),
3938                                                 });
3939                                         },
3940                                 }
3941                         }
3942                 }
3943
3944                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
3945                         Some(msgs::UpdateFee {
3946                                 channel_id: self.context.channel_id(),
3947                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
3948                         })
3949                 } else { None };
3950
3951                 log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3952                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
3953                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3954                 msgs::CommitmentUpdate {
3955                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
3956                         commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
3957                 }
3958         }
3959
3960         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
3961         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
3962                 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
3963                         assert!(self.context.shutdown_scriptpubkey.is_some());
3964                         Some(msgs::Shutdown {
3965                                 channel_id: self.context.channel_id,
3966                                 scriptpubkey: self.get_closing_scriptpubkey(),
3967                         })
3968                 } else { None }
3969         }
3970
3971         /// May panic if some calls other than message-handling calls (which will all Err immediately)
3972         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
3973         ///
3974         /// Some links printed in log lines are included here to check them during build (when run with
3975         /// `cargo doc --document-private-items`):
3976         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
3977         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
3978         pub fn channel_reestablish<L: Deref, NS: Deref>(
3979                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
3980                 genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
3981         ) -> Result<ReestablishResponses, ChannelError>
3982         where
3983                 L::Target: Logger,
3984                 NS::Target: NodeSigner
3985         {
3986                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
3987                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
3988                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
3989                         // just close here instead of trying to recover.
3990                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
3991                 }
3992
3993                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
3994                         msg.next_local_commitment_number == 0 {
3995                         return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
3996                 }
3997
3998                 if msg.next_remote_commitment_number > 0 {
3999                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4000                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4001                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4002                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4003                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4004                         }
4005                         if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4006                                 macro_rules! log_and_panic {
4007                                         ($err_msg: expr) => {
4008                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4009                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4010                                         }
4011                                 }
4012                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4013                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4014                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4015                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4016                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4017                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4018                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4019                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4020                         }
4021                 }
4022
4023                 // Before we change the state of the channel, we check if the peer is sending a very old
4024                 // commitment transaction number, if yes we send a warning message.
4025                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4026                 if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4027                         return Err(
4028                                 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4029                         );
4030                 }
4031
4032                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4033                 // remaining cases either succeed or ErrorMessage-fail).
4034                 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4035                 self.context.sent_message_awaiting_response = None;
4036
4037                 let shutdown_msg = self.get_outbound_shutdown();
4038
4039                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
4040
4041                 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4042                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4043                         if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4044                                         self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4045                                 if msg.next_remote_commitment_number != 0 {
4046                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4047                                 }
4048                                 // Short circuit the whole handler as there is nothing we can resend them
4049                                 return Ok(ReestablishResponses {
4050                                         channel_ready: None,
4051                                         raa: None, commitment_update: None,
4052                                         order: RAACommitmentOrder::CommitmentFirst,
4053                                         shutdown_msg, announcement_sigs,
4054                                 });
4055                         }
4056
4057                         // We have OurChannelReady set!
4058                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4059                         return Ok(ReestablishResponses {
4060                                 channel_ready: Some(msgs::ChannelReady {
4061                                         channel_id: self.context.channel_id(),
4062                                         next_per_commitment_point,
4063                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
4064                                 }),
4065                                 raa: None, commitment_update: None,
4066                                 order: RAACommitmentOrder::CommitmentFirst,
4067                                 shutdown_msg, announcement_sigs,
4068                         });
4069                 }
4070
4071                 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4072                         // Remote isn't waiting on any RevokeAndACK from us!
4073                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4074                         None
4075                 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4076                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4077                                 self.context.monitor_pending_revoke_and_ack = true;
4078                                 None
4079                         } else {
4080                                 Some(self.get_last_revoke_and_ack())
4081                         }
4082                 } else {
4083                         return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4084                 };
4085
4086                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4087                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4088                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4089                 // the corresponding revoke_and_ack back yet.
4090                 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4091                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4092                         self.mark_awaiting_response();
4093                 }
4094                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4095
4096                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4097                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4098                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4099                         Some(msgs::ChannelReady {
4100                                 channel_id: self.context.channel_id(),
4101                                 next_per_commitment_point,
4102                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4103                         })
4104                 } else { None };
4105
4106                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4107                         if required_revoke.is_some() {
4108                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4109                         } else {
4110                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4111                         }
4112
4113                         Ok(ReestablishResponses {
4114                                 channel_ready, shutdown_msg, announcement_sigs,
4115                                 raa: required_revoke,
4116                                 commitment_update: None,
4117                                 order: self.context.resend_order.clone(),
4118                         })
4119                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4120                         if required_revoke.is_some() {
4121                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4122                         } else {
4123                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4124                         }
4125
4126                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4127                                 self.context.monitor_pending_commitment_signed = true;
4128                                 Ok(ReestablishResponses {
4129                                         channel_ready, shutdown_msg, announcement_sigs,
4130                                         commitment_update: None, raa: None,
4131                                         order: self.context.resend_order.clone(),
4132                                 })
4133                         } else {
4134                                 Ok(ReestablishResponses {
4135                                         channel_ready, shutdown_msg, announcement_sigs,
4136                                         raa: required_revoke,
4137                                         commitment_update: Some(self.get_last_commitment_update(logger)),
4138                                         order: self.context.resend_order.clone(),
4139                                 })
4140                         }
4141                 } else {
4142                         Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4143                 }
4144         }
4145
4146         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4147         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4148         /// at which point they will be recalculated.
4149         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4150                 -> (u64, u64)
4151                 where F::Target: FeeEstimator
4152         {
4153                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4154
4155                 // Propose a range from our current Background feerate to our Normal feerate plus our
4156                 // force_close_avoidance_max_fee_satoshis.
4157                 // If we fail to come to consensus, we'll have to force-close.
4158                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
4159                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
4160                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4161
4162                 // The spec requires that (when the channel does not have anchors) we only send absolute
4163                 // channel fees no greater than the absolute channel fee on the current commitment
4164                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4165                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4166                 // some force-closure by old nodes, but we wanted to close the channel anyway.
4167
4168                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4169                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4170                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4171                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4172                 }
4173
4174                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4175                 // below our dust limit, causing the output to disappear. We don't bother handling this
4176                 // case, however, as this should only happen if a channel is closed before any (material)
4177                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4178                 // come to consensus with our counterparty on appropriate fees, however it should be a
4179                 // relatively rare case. We can revisit this later, though note that in order to determine
4180                 // if the funders' output is dust we have to know the absolute fee we're going to use.
4181                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4182                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4183                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4184                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
4185                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
4186                                 // target feerate-calculated fee.
4187                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4188                                         proposed_max_feerate as u64 * tx_weight / 1000)
4189                         } else {
4190                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4191                         };
4192
4193                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4194                 self.context.closing_fee_limits.clone().unwrap()
4195         }
4196
4197         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4198         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4199         /// this point if we're the funder we should send the initial closing_signed, and in any case
4200         /// shutdown should complete within a reasonable timeframe.
4201         fn closing_negotiation_ready(&self) -> bool {
4202                 self.context.closing_negotiation_ready()
4203         }
4204
4205         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4206         /// an Err if no progress is being made and the channel should be force-closed instead.
4207         /// Should be called on a one-minute timer.
4208         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4209                 if self.closing_negotiation_ready() {
4210                         if self.context.closing_signed_in_flight {
4211                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4212                         } else {
4213                                 self.context.closing_signed_in_flight = true;
4214                         }
4215                 }
4216                 Ok(())
4217         }
4218
4219         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4220                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4221                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4222                 where F::Target: FeeEstimator, L::Target: Logger
4223         {
4224                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4225                         return Ok((None, None));
4226                 }
4227
4228                 if !self.context.is_outbound() {
4229                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4230                                 return self.closing_signed(fee_estimator, &msg);
4231                         }
4232                         return Ok((None, None));
4233                 }
4234
4235                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4236
4237                 assert!(self.context.shutdown_scriptpubkey.is_some());
4238                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4239                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4240                         our_min_fee, our_max_fee, total_fee_satoshis);
4241
4242                 match &self.context.holder_signer {
4243                         ChannelSignerType::Ecdsa(ecdsa) => {
4244                                 let sig = ecdsa
4245                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4246                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4247
4248                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4249                                 Ok((Some(msgs::ClosingSigned {
4250                                         channel_id: self.context.channel_id,
4251                                         fee_satoshis: total_fee_satoshis,
4252                                         signature: sig,
4253                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4254                                                 min_fee_satoshis: our_min_fee,
4255                                                 max_fee_satoshis: our_max_fee,
4256                                         }),
4257                                 }), None))
4258                         }
4259                 }
4260         }
4261
4262         // Marks a channel as waiting for a response from the counterparty. If it's not received
4263         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4264         // a reconnection.
4265         fn mark_awaiting_response(&mut self) {
4266                 self.context.sent_message_awaiting_response = Some(0);
4267         }
4268
4269         /// Determines whether we should disconnect the counterparty due to not receiving a response
4270         /// within our expected timeframe.
4271         ///
4272         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4273         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4274                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4275                         ticks_elapsed
4276                 } else {
4277                         // Don't disconnect when we're not waiting on a response.
4278                         return false;
4279                 };
4280                 *ticks_elapsed += 1;
4281                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4282         }
4283
4284         pub fn shutdown(
4285                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4286         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4287         {
4288                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4289                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4290                 }
4291                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4292                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
4293                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4294                         // can do that via error message without getting a connection fail anyway...
4295                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4296                 }
4297                 for htlc in self.context.pending_inbound_htlcs.iter() {
4298                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4299                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4300                         }
4301                 }
4302                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4303
4304                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4305                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4306                 }
4307
4308                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4309                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4310                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4311                         }
4312                 } else {
4313                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4314                 }
4315
4316                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4317                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4318                 // any further commitment updates after we set LocalShutdownSent.
4319                 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4320
4321                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4322                         Some(_) => false,
4323                         None => {
4324                                 assert!(send_shutdown);
4325                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4326                                         Ok(scriptpubkey) => scriptpubkey,
4327                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4328                                 };
4329                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
4330                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4331                                 }
4332                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4333                                 true
4334                         },
4335                 };
4336
4337                 // From here on out, we may not fail!
4338
4339                 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4340                 self.context.update_time_counter += 1;
4341
4342                 let monitor_update = if update_shutdown_script {
4343                         self.context.latest_monitor_update_id += 1;
4344                         let monitor_update = ChannelMonitorUpdate {
4345                                 update_id: self.context.latest_monitor_update_id,
4346                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4347                                         scriptpubkey: self.get_closing_scriptpubkey(),
4348                                 }],
4349                         };
4350                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4351                         self.push_ret_blockable_mon_update(monitor_update)
4352                 } else { None };
4353                 let shutdown = if send_shutdown {
4354                         Some(msgs::Shutdown {
4355                                 channel_id: self.context.channel_id,
4356                                 scriptpubkey: self.get_closing_scriptpubkey(),
4357                         })
4358                 } else { None };
4359
4360                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4361                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4362                 // cell HTLCs and return them to fail the payment.
4363                 self.context.holding_cell_update_fee = None;
4364                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4365                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4366                         match htlc_update {
4367                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4368                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4369                                         false
4370                                 },
4371                                 _ => true
4372                         }
4373                 });
4374
4375                 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4376                 self.context.update_time_counter += 1;
4377
4378                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4379         }
4380
4381         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4382                 let mut tx = closing_tx.trust().built_transaction().clone();
4383
4384                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4385
4386                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4387                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4388                 let mut holder_sig = sig.serialize_der().to_vec();
4389                 holder_sig.push(EcdsaSighashType::All as u8);
4390                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4391                 cp_sig.push(EcdsaSighashType::All as u8);
4392                 if funding_key[..] < counterparty_funding_key[..] {
4393                         tx.input[0].witness.push(holder_sig);
4394                         tx.input[0].witness.push(cp_sig);
4395                 } else {
4396                         tx.input[0].witness.push(cp_sig);
4397                         tx.input[0].witness.push(holder_sig);
4398                 }
4399
4400                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4401                 tx
4402         }
4403
4404         pub fn closing_signed<F: Deref>(
4405                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4406                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4407                 where F::Target: FeeEstimator
4408         {
4409                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4410                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4411                 }
4412                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4413                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4414                 }
4415                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4416                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4417                 }
4418                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4419                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4420                 }
4421
4422                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4423                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4424                 }
4425
4426                 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4427                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
4428                         return Ok((None, None));
4429                 }
4430
4431                 let funding_redeemscript = self.context.get_funding_redeemscript();
4432                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4433                 if used_total_fee != msg.fee_satoshis {
4434                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4435                 }
4436                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4437
4438                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4439                         Ok(_) => {},
4440                         Err(_e) => {
4441                                 // The remote end may have decided to revoke their output due to inconsistent dust
4442                                 // limits, so check for that case by re-checking the signature here.
4443                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4444                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4445                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4446                         },
4447                 };
4448
4449                 for outp in closing_tx.trust().built_transaction().output.iter() {
4450                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4451                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4452                         }
4453                 }
4454
4455                 assert!(self.context.shutdown_scriptpubkey.is_some());
4456                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4457                         if last_fee == msg.fee_satoshis {
4458                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4459                                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4460                                 self.context.update_time_counter += 1;
4461                                 return Ok((None, Some(tx)));
4462                         }
4463                 }
4464
4465                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4466
4467                 macro_rules! propose_fee {
4468                         ($new_fee: expr) => {
4469                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4470                                         (closing_tx, $new_fee)
4471                                 } else {
4472                                         self.build_closing_transaction($new_fee, false)
4473                                 };
4474
4475                                 return match &self.context.holder_signer {
4476                                         ChannelSignerType::Ecdsa(ecdsa) => {
4477                                                 let sig = ecdsa
4478                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4479                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4480
4481                                                 let signed_tx = if $new_fee == msg.fee_satoshis {
4482                                                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
4483                                                         self.context.update_time_counter += 1;
4484                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4485                                                         Some(tx)
4486                                                 } else { None };
4487
4488                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4489                                                 Ok((Some(msgs::ClosingSigned {
4490                                                         channel_id: self.context.channel_id,
4491                                                         fee_satoshis: used_fee,
4492                                                         signature: sig,
4493                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4494                                                                 min_fee_satoshis: our_min_fee,
4495                                                                 max_fee_satoshis: our_max_fee,
4496                                                         }),
4497                                                 }), signed_tx))
4498                                         }
4499                                 }
4500                         }
4501                 }
4502
4503                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4504                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4505                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4506                         }
4507                         if max_fee_satoshis < our_min_fee {
4508                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4509                         }
4510                         if min_fee_satoshis > our_max_fee {
4511                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4512                         }
4513
4514                         if !self.context.is_outbound() {
4515                                 // They have to pay, so pick the highest fee in the overlapping range.
4516                                 // We should never set an upper bound aside from their full balance
4517                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4518                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4519                         } else {
4520                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4521                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4522                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
4523                                 }
4524                                 // The proposed fee is in our acceptable range, accept it and broadcast!
4525                                 propose_fee!(msg.fee_satoshis);
4526                         }
4527                 } else {
4528                         // Old fee style negotiation. We don't bother to enforce whether they are complying
4529                         // with the "making progress" requirements, we just comply and hope for the best.
4530                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4531                                 if msg.fee_satoshis > last_fee {
4532                                         if msg.fee_satoshis < our_max_fee {
4533                                                 propose_fee!(msg.fee_satoshis);
4534                                         } else if last_fee < our_max_fee {
4535                                                 propose_fee!(our_max_fee);
4536                                         } else {
4537                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4538                                         }
4539                                 } else {
4540                                         if msg.fee_satoshis > our_min_fee {
4541                                                 propose_fee!(msg.fee_satoshis);
4542                                         } else if last_fee > our_min_fee {
4543                                                 propose_fee!(our_min_fee);
4544                                         } else {
4545                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4546                                         }
4547                                 }
4548                         } else {
4549                                 if msg.fee_satoshis < our_min_fee {
4550                                         propose_fee!(our_min_fee);
4551                                 } else if msg.fee_satoshis > our_max_fee {
4552                                         propose_fee!(our_max_fee);
4553                                 } else {
4554                                         propose_fee!(msg.fee_satoshis);
4555                                 }
4556                         }
4557                 }
4558         }
4559
4560         fn internal_htlc_satisfies_config(
4561                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4562         ) -> Result<(), (&'static str, u16)> {
4563                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4564                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4565                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4566                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4567                         return Err((
4568                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4569                                 0x1000 | 12, // fee_insufficient
4570                         ));
4571                 }
4572                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4573                         return Err((
4574                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4575                                 0x1000 | 13, // incorrect_cltv_expiry
4576                         ));
4577                 }
4578                 Ok(())
4579         }
4580
4581         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4582         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4583         /// unsuccessful, falls back to the previous one if one exists.
4584         pub fn htlc_satisfies_config(
4585                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4586         ) -> Result<(), (&'static str, u16)> {
4587                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4588                         .or_else(|err| {
4589                                 if let Some(prev_config) = self.context.prev_config() {
4590                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4591                                 } else {
4592                                         Err(err)
4593                                 }
4594                         })
4595         }
4596
4597         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4598                 self.context.cur_holder_commitment_transaction_number + 1
4599         }
4600
4601         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4602                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4603         }
4604
4605         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4606                 self.context.cur_counterparty_commitment_transaction_number + 2
4607         }
4608
4609         #[cfg(test)]
4610         pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
4611                 &self.context.holder_signer
4612         }
4613
4614         #[cfg(test)]
4615         pub fn get_value_stat(&self) -> ChannelValueStat {
4616                 ChannelValueStat {
4617                         value_to_self_msat: self.context.value_to_self_msat,
4618                         channel_value_msat: self.context.channel_value_satoshis * 1000,
4619                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4620                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4621                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4622                         holding_cell_outbound_amount_msat: {
4623                                 let mut res = 0;
4624                                 for h in self.context.holding_cell_htlc_updates.iter() {
4625                                         match h {
4626                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4627                                                         res += amount_msat;
4628                                                 }
4629                                                 _ => {}
4630                                         }
4631                                 }
4632                                 res
4633                         },
4634                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4635                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4636                 }
4637         }
4638
4639         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4640         /// Allowed in any state (including after shutdown)
4641         pub fn is_awaiting_monitor_update(&self) -> bool {
4642                 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4643         }
4644
4645         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4646         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4647                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4648                 self.context.blocked_monitor_updates[0].update.update_id - 1
4649         }
4650
4651         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4652         /// further blocked monitor update exists after the next.
4653         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4654                 if self.context.blocked_monitor_updates.is_empty() { return None; }
4655                 Some((self.context.blocked_monitor_updates.remove(0).update,
4656                         !self.context.blocked_monitor_updates.is_empty()))
4657         }
4658
4659         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4660         /// immediately given to the user for persisting or `None` if it should be held as blocked.
4661         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4662         -> Option<ChannelMonitorUpdate> {
4663                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4664                 if !release_monitor {
4665                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4666                                 update,
4667                         });
4668                         None
4669                 } else {
4670                         Some(update)
4671                 }
4672         }
4673
4674         pub fn blocked_monitor_updates_pending(&self) -> usize {
4675                 self.context.blocked_monitor_updates.len()
4676         }
4677
4678         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4679         /// If the channel is outbound, this implies we have not yet broadcasted the funding
4680         /// transaction. If the channel is inbound, this implies simply that the channel has not
4681         /// advanced state.
4682         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4683                 if !self.is_awaiting_monitor_update() { return false; }
4684                 if self.context.channel_state &
4685                         !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4686                                 == ChannelState::FundingSent as u32 {
4687                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4688                         // FundingSent set, though our peer could have sent their channel_ready.
4689                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4690                         return true;
4691                 }
4692                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4693                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4694                         // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4695                         // waiting for the initial monitor persistence. Thus, we check if our commitment
4696                         // transaction numbers have both been iterated only exactly once (for the
4697                         // funding_signed), and we're awaiting monitor update.
4698                         //
4699                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4700                         // only way to get an awaiting-monitor-update state during initial funding is if the
4701                         // initial monitor persistence is still pending).
4702                         //
4703                         // Because deciding we're awaiting initial broadcast spuriously could result in
4704                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4705                         // we hard-assert here, even in production builds.
4706                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4707                         assert!(self.context.monitor_pending_channel_ready);
4708                         assert_eq!(self.context.latest_monitor_update_id, 0);
4709                         return true;
4710                 }
4711                 false
4712         }
4713
4714         /// Returns true if our channel_ready has been sent
4715         pub fn is_our_channel_ready(&self) -> bool {
4716                 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4717         }
4718
4719         /// Returns true if our peer has either initiated or agreed to shut down the channel.
4720         pub fn received_shutdown(&self) -> bool {
4721                 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4722         }
4723
4724         /// Returns true if we either initiated or agreed to shut down the channel.
4725         pub fn sent_shutdown(&self) -> bool {
4726                 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4727         }
4728
4729         /// Returns true if this channel is fully shut down. True here implies that no further actions
4730         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4731         /// will be handled appropriately by the chain monitor.
4732         pub fn is_shutdown(&self) -> bool {
4733                 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
4734                         assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4735                         true
4736                 } else { false }
4737         }
4738
4739         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4740                 self.context.channel_update_status
4741         }
4742
4743         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4744                 self.context.update_time_counter += 1;
4745                 self.context.channel_update_status = status;
4746         }
4747
4748         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4749                 // Called:
4750                 //  * always when a new block/transactions are confirmed with the new height
4751                 //  * when funding is signed with a height of 0
4752                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4753                         return None;
4754                 }
4755
4756                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4757                 if funding_tx_confirmations <= 0 {
4758                         self.context.funding_tx_confirmation_height = 0;
4759                 }
4760
4761                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4762                         return None;
4763                 }
4764
4765                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4766                 // channel_ready until the entire batch is ready.
4767                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4768                 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4769                         self.context.channel_state |= ChannelState::OurChannelReady as u32;
4770                         true
4771                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4772                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4773                         self.context.update_time_counter += 1;
4774                         true
4775                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4776                         // We got a reorg but not enough to trigger a force close, just ignore.
4777                         false
4778                 } else {
4779                         if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4780                                 // We should never see a funding transaction on-chain until we've received
4781                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4782                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4783                                 // however, may do this and we shouldn't treat it as a bug.
4784                                 #[cfg(not(fuzzing))]
4785                                 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4786                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4787                                         self.context.channel_state);
4788                         }
4789                         // We got a reorg but not enough to trigger a force close, just ignore.
4790                         false
4791                 };
4792
4793                 if need_commitment_update {
4794                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4795                                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4796                                         let next_per_commitment_point =
4797                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4798                                         return Some(msgs::ChannelReady {
4799                                                 channel_id: self.context.channel_id,
4800                                                 next_per_commitment_point,
4801                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4802                                         });
4803                                 }
4804                         } else {
4805                                 self.context.monitor_pending_channel_ready = true;
4806                         }
4807                 }
4808                 None
4809         }
4810
4811         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4812         /// In the first case, we store the confirmation height and calculating the short channel id.
4813         /// In the second, we simply return an Err indicating we need to be force-closed now.
4814         pub fn transactions_confirmed<NS: Deref, L: Deref>(
4815                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
4816                 genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
4817         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4818         where
4819                 NS::Target: NodeSigner,
4820                 L::Target: Logger
4821         {
4822                 if let Some(funding_txo) = self.context.get_funding_txo() {
4823                         for &(index_in_block, tx) in txdata.iter() {
4824                                 // Check if the transaction is the expected funding transaction, and if it is,
4825                                 // check that it pays the right amount to the right script.
4826                                 if self.context.funding_tx_confirmation_height == 0 {
4827                                         if tx.txid() == funding_txo.txid {
4828                                                 let txo_idx = funding_txo.index as usize;
4829                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
4830                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
4831                                                         if self.context.is_outbound() {
4832                                                                 // If we generated the funding transaction and it doesn't match what it
4833                                                                 // should, the client is really broken and we should just panic and
4834                                                                 // tell them off. That said, because hash collisions happen with high
4835                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
4836                                                                 // channel and move on.
4837                                                                 #[cfg(not(fuzzing))]
4838                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4839                                                         }
4840                                                         self.context.update_time_counter += 1;
4841                                                         let err_reason = "funding tx had wrong script/value or output index";
4842                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
4843                                                 } else {
4844                                                         if self.context.is_outbound() {
4845                                                                 if !tx.is_coin_base() {
4846                                                                         for input in tx.input.iter() {
4847                                                                                 if input.witness.is_empty() {
4848                                                                                         // We generated a malleable funding transaction, implying we've
4849                                                                                         // just exposed ourselves to funds loss to our counterparty.
4850                                                                                         #[cfg(not(fuzzing))]
4851                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4852                                                                                 }
4853                                                                         }
4854                                                                 }
4855                                                         }
4856                                                         self.context.funding_tx_confirmation_height = height;
4857                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
4858                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
4859                                                                 Ok(scid) => Some(scid),
4860                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
4861                                                         }
4862                                                 }
4863                                                 // If this is a coinbase transaction and not a 0-conf channel
4864                                                 // we should update our min_depth to 100 to handle coinbase maturity
4865                                                 if tx.is_coin_base() &&
4866                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
4867                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
4868                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
4869                                                 }
4870                                         }
4871                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
4872                                         // send it immediately instead of waiting for a best_block_updated call (which
4873                                         // may have already happened for this block).
4874                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
4875                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4876                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
4877                                                 return Ok((Some(channel_ready), announcement_sigs));
4878                                         }
4879                                 }
4880                                 for inp in tx.input.iter() {
4881                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
4882                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
4883                                                 return Err(ClosureReason::CommitmentTxConfirmed);
4884                                         }
4885                                 }
4886                         }
4887                 }
4888                 Ok((None, None))
4889         }
4890
4891         /// When a new block is connected, we check the height of the block against outbound holding
4892         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
4893         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
4894         /// handled by the ChannelMonitor.
4895         ///
4896         /// If we return Err, the channel may have been closed, at which point the standard
4897         /// requirements apply - no calls may be made except those explicitly stated to be allowed
4898         /// post-shutdown.
4899         ///
4900         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
4901         /// back.
4902         pub fn best_block_updated<NS: Deref, L: Deref>(
4903                 &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
4904                 node_signer: &NS, user_config: &UserConfig, logger: &L
4905         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4906         where
4907                 NS::Target: NodeSigner,
4908                 L::Target: Logger
4909         {
4910                 self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
4911         }
4912
4913         fn do_best_block_updated<NS: Deref, L: Deref>(
4914                 &mut self, height: u32, highest_header_time: u32,
4915                 genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
4916         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4917         where
4918                 NS::Target: NodeSigner,
4919                 L::Target: Logger
4920         {
4921                 let mut timed_out_htlcs = Vec::new();
4922                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4923                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
4924                 // ~now.
4925                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
4926                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4927                         match htlc_update {
4928                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
4929                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
4930                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
4931                                                 false
4932                                         } else { true }
4933                                 },
4934                                 _ => true
4935                         }
4936                 });
4937
4938                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
4939
4940                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4941                         let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
4942                                 self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
4943                         } else { None };
4944                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4945                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
4946                 }
4947
4948                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4949                 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
4950                    (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
4951                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4952                         if self.context.funding_tx_confirmation_height == 0 {
4953                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
4954                                 // zero if it has been reorged out, however in either case, our state flags
4955                                 // indicate we've already sent a channel_ready
4956                                 funding_tx_confirmations = 0;
4957                         }
4958
4959                         // If we've sent channel_ready (or have both sent and received channel_ready), and
4960                         // the funding transaction has become unconfirmed,
4961                         // close the channel and hope we can get the latest state on chain (because presumably
4962                         // the funding transaction is at least still in the mempool of most nodes).
4963                         //
4964                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
4965                         // 0-conf channel, but not doing so may lead to the
4966                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
4967                         // to.
4968                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
4969                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
4970                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
4971                                 return Err(ClosureReason::ProcessingError { err: err_reason });
4972                         }
4973                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
4974                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
4975                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
4976                         // If funding_tx_confirmed_in is unset, the channel must not be active
4977                         assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
4978                         assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
4979                         return Err(ClosureReason::FundingTimedOut);
4980                 }
4981
4982                 let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
4983                         self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
4984                 } else { None };
4985                 Ok((None, timed_out_htlcs, announcement_sigs))
4986         }
4987
4988         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
4989         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
4990         /// before the channel has reached channel_ready and we can just wait for more blocks.
4991         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
4992                 if self.context.funding_tx_confirmation_height != 0 {
4993                         // We handle the funding disconnection by calling best_block_updated with a height one
4994                         // below where our funding was connected, implying a reorg back to conf_height - 1.
4995                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
4996                         // We use the time field to bump the current time we set on channel updates if its
4997                         // larger. If we don't know that time has moved forward, we can just set it to the last
4998                         // time we saw and it will be ignored.
4999                         let best_time = self.context.update_time_counter;
5000                         match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
5001                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5002                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5003                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5004                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5005                                         Ok(())
5006                                 },
5007                                 Err(e) => Err(e)
5008                         }
5009                 } else {
5010                         // We never learned about the funding confirmation anyway, just ignore
5011                         Ok(())
5012                 }
5013         }
5014
5015         // Methods to get unprompted messages to send to the remote end (or where we already returned
5016         // something in the handler for the message that prompted this message):
5017
5018         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5019         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5020         /// directions). Should be used for both broadcasted announcements and in response to an
5021         /// AnnouncementSignatures message from the remote peer.
5022         ///
5023         /// Will only fail if we're not in a state where channel_announcement may be sent (including
5024         /// closing).
5025         ///
5026         /// This will only return ChannelError::Ignore upon failure.
5027         ///
5028         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5029         fn get_channel_announcement<NS: Deref>(
5030                 &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
5031         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5032                 if !self.context.config.announced_channel {
5033                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5034                 }
5035                 if !self.context.is_usable() {
5036                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5037                 }
5038
5039                 let short_channel_id = self.context.get_short_channel_id()
5040                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5041                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5042                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5043                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5044                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5045
5046                 let msg = msgs::UnsignedChannelAnnouncement {
5047                         features: channelmanager::provided_channel_features(&user_config),
5048                         chain_hash,
5049                         short_channel_id,
5050                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5051                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5052                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5053                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5054                         excess_data: Vec::new(),
5055                 };
5056
5057                 Ok(msg)
5058         }
5059
5060         fn get_announcement_sigs<NS: Deref, L: Deref>(
5061                 &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
5062                 best_block_height: u32, logger: &L
5063         ) -> Option<msgs::AnnouncementSignatures>
5064         where
5065                 NS::Target: NodeSigner,
5066                 L::Target: Logger
5067         {
5068                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5069                         return None;
5070                 }
5071
5072                 if !self.context.is_usable() {
5073                         return None;
5074                 }
5075
5076                 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5077                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5078                         return None;
5079                 }
5080
5081                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5082                         return None;
5083                 }
5084
5085                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5086                 let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
5087                         Ok(a) => a,
5088                         Err(e) => {
5089                                 log_trace!(logger, "{:?}", e);
5090                                 return None;
5091                         }
5092                 };
5093                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5094                         Err(_) => {
5095                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5096                                 return None;
5097                         },
5098                         Ok(v) => v
5099                 };
5100                 match &self.context.holder_signer {
5101                         ChannelSignerType::Ecdsa(ecdsa) => {
5102                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5103                                         Err(_) => {
5104                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5105                                                 return None;
5106                                         },
5107                                         Ok(v) => v
5108                                 };
5109                                 let short_channel_id = match self.context.get_short_channel_id() {
5110                                         Some(scid) => scid,
5111                                         None => return None,
5112                                 };
5113
5114                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5115
5116                                 Some(msgs::AnnouncementSignatures {
5117                                         channel_id: self.context.channel_id(),
5118                                         short_channel_id,
5119                                         node_signature: our_node_sig,
5120                                         bitcoin_signature: our_bitcoin_sig,
5121                                 })
5122                         }
5123                 }
5124         }
5125
5126         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5127         /// available.
5128         fn sign_channel_announcement<NS: Deref>(
5129                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5130         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5131                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5132                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5133                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5134                         let were_node_one = announcement.node_id_1 == our_node_key;
5135
5136                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5137                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5138                         match &self.context.holder_signer {
5139                                 ChannelSignerType::Ecdsa(ecdsa) => {
5140                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5141                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5142                                         Ok(msgs::ChannelAnnouncement {
5143                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5144                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5145                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5146                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5147                                                 contents: announcement,
5148                                         })
5149                                 }
5150                         }
5151                 } else {
5152                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5153                 }
5154         }
5155
5156         /// Processes an incoming announcement_signatures message, providing a fully-signed
5157         /// channel_announcement message which we can broadcast and storing our counterparty's
5158         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5159         pub fn announcement_signatures<NS: Deref>(
5160                 &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
5161                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5162         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5163                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5164
5165                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5166
5167                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5168                         return Err(ChannelError::Close(format!(
5169                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5170                                  &announcement, self.context.get_counterparty_node_id())));
5171                 }
5172                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5173                         return Err(ChannelError::Close(format!(
5174                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5175                                 &announcement, self.context.counterparty_funding_pubkey())));
5176                 }
5177
5178                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5179                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5180                         return Err(ChannelError::Ignore(
5181                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5182                 }
5183
5184                 self.sign_channel_announcement(node_signer, announcement)
5185         }
5186
5187         /// Gets a signed channel_announcement for this channel, if we previously received an
5188         /// announcement_signatures from our counterparty.
5189         pub fn get_signed_channel_announcement<NS: Deref>(
5190                 &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
5191         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5192                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5193                         return None;
5194                 }
5195                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5196                         Ok(res) => res,
5197                         Err(_) => return None,
5198                 };
5199                 match self.sign_channel_announcement(node_signer, announcement) {
5200                         Ok(res) => Some(res),
5201                         Err(_) => None,
5202                 }
5203         }
5204
5205         /// May panic if called on a channel that wasn't immediately-previously
5206         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5207         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5208                 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5209                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5210                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5211                 // current to_remote balances. However, it no longer has any use, and thus is now simply
5212                 // set to a dummy (but valid, as required by the spec) public key.
5213                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5214                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5215                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5216                 let mut pk = [2; 33]; pk[1] = 0xff;
5217                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5218                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5219                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5220                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5221                         remote_last_secret
5222                 } else {
5223                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5224                         [0;32]
5225                 };
5226                 self.mark_awaiting_response();
5227                 msgs::ChannelReestablish {
5228                         channel_id: self.context.channel_id(),
5229                         // The protocol has two different commitment number concepts - the "commitment
5230                         // transaction number", which starts from 0 and counts up, and the "revocation key
5231                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5232                         // commitment transaction numbers by the index which will be used to reveal the
5233                         // revocation key for that commitment transaction, which means we have to convert them
5234                         // to protocol-level commitment numbers here...
5235
5236                         // next_local_commitment_number is the next commitment_signed number we expect to
5237                         // receive (indicating if they need to resend one that we missed).
5238                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5239                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5240                         // receive, however we track it by the next commitment number for a remote transaction
5241                         // (which is one further, as they always revoke previous commitment transaction, not
5242                         // the one we send) so we have to decrement by 1. Note that if
5243                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5244                         // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5245                         // overflow here.
5246                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5247                         your_last_per_commitment_secret: remote_last_secret,
5248                         my_current_per_commitment_point: dummy_pubkey,
5249                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5250                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5251                         // txid of that interactive transaction, else we MUST NOT set it.
5252                         next_funding_txid: None,
5253                 }
5254         }
5255
5256
5257         // Send stuff to our remote peers:
5258
5259         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5260         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5261         /// commitment update.
5262         ///
5263         /// `Err`s will only be [`ChannelError::Ignore`].
5264         pub fn queue_add_htlc<F: Deref, L: Deref>(
5265                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5266                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5267                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5268         ) -> Result<(), ChannelError>
5269         where F::Target: FeeEstimator, L::Target: Logger
5270         {
5271                 self
5272                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5273                                 skimmed_fee_msat, fee_estimator, logger)
5274                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5275                         .map_err(|err| {
5276                                 if let ChannelError::Ignore(_) = err { /* fine */ }
5277                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5278                                 err
5279                         })
5280         }
5281
5282         /// Adds a pending outbound HTLC to this channel, note that you probably want
5283         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5284         ///
5285         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5286         /// the wire:
5287         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5288         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5289         ///   awaiting ACK.
5290         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5291         ///   we may not yet have sent the previous commitment update messages and will need to
5292         ///   regenerate them.
5293         ///
5294         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5295         /// on this [`Channel`] if `force_holding_cell` is false.
5296         ///
5297         /// `Err`s will only be [`ChannelError::Ignore`].
5298         fn send_htlc<F: Deref, L: Deref>(
5299                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5300                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5301                 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5302         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5303         where F::Target: FeeEstimator, L::Target: Logger
5304         {
5305                 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5306                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5307                 }
5308                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5309                 if amount_msat > channel_total_msat {
5310                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5311                 }
5312
5313                 if amount_msat == 0 {
5314                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5315                 }
5316
5317                 let available_balances = self.context.get_available_balances(fee_estimator);
5318                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5319                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5320                                 available_balances.next_outbound_htlc_minimum_msat)));
5321                 }
5322
5323                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5324                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5325                                 available_balances.next_outbound_htlc_limit_msat)));
5326                 }
5327
5328                 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5329                         // Note that this should never really happen, if we're !is_live() on receipt of an
5330                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5331                         // the user to send directly into a !is_live() channel. However, if we
5332                         // disconnected during the time the previous hop was doing the commitment dance we may
5333                         // end up getting here after the forwarding delay. In any case, returning an
5334                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5335                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5336                 }
5337
5338                 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5339                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5340                         payment_hash, amount_msat,
5341                         if force_holding_cell { "into holding cell" }
5342                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5343                         else { "to peer" });
5344
5345                 if need_holding_cell {
5346                         force_holding_cell = true;
5347                 }
5348
5349                 // Now update local state:
5350                 if force_holding_cell {
5351                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5352                                 amount_msat,
5353                                 payment_hash,
5354                                 cltv_expiry,
5355                                 source,
5356                                 onion_routing_packet,
5357                                 skimmed_fee_msat,
5358                         });
5359                         return Ok(None);
5360                 }
5361
5362                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5363                         htlc_id: self.context.next_holder_htlc_id,
5364                         amount_msat,
5365                         payment_hash: payment_hash.clone(),
5366                         cltv_expiry,
5367                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5368                         source,
5369                         skimmed_fee_msat,
5370                 });
5371
5372                 let res = msgs::UpdateAddHTLC {
5373                         channel_id: self.context.channel_id,
5374                         htlc_id: self.context.next_holder_htlc_id,
5375                         amount_msat,
5376                         payment_hash,
5377                         cltv_expiry,
5378                         onion_routing_packet,
5379                         skimmed_fee_msat,
5380                 };
5381                 self.context.next_holder_htlc_id += 1;
5382
5383                 Ok(Some(res))
5384         }
5385
5386         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5387                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5388                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5389                 // fail to generate this, we still are at least at a position where upgrading their status
5390                 // is acceptable.
5391                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5392                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5393                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5394                         } else { None };
5395                         if let Some(state) = new_state {
5396                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5397                                 htlc.state = state;
5398                         }
5399                 }
5400                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5401                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5402                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5403                                 // Grab the preimage, if it exists, instead of cloning
5404                                 let mut reason = OutboundHTLCOutcome::Success(None);
5405                                 mem::swap(outcome, &mut reason);
5406                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5407                         }
5408                 }
5409                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5410                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5411                                 debug_assert!(!self.context.is_outbound());
5412                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5413                                 self.context.feerate_per_kw = feerate;
5414                                 self.context.pending_update_fee = None;
5415                         }
5416                 }
5417                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5418
5419                 let (mut htlcs_ref, counterparty_commitment_tx) =
5420                         self.build_commitment_no_state_update(logger);
5421                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5422                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5423                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5424
5425                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5426                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5427                 }
5428
5429                 self.context.latest_monitor_update_id += 1;
5430                 let monitor_update = ChannelMonitorUpdate {
5431                         update_id: self.context.latest_monitor_update_id,
5432                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5433                                 commitment_txid: counterparty_commitment_txid,
5434                                 htlc_outputs: htlcs.clone(),
5435                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5436                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5437                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5438                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5439                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5440                         }]
5441                 };
5442                 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5443                 monitor_update
5444         }
5445
5446         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5447         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5448         where L::Target: Logger
5449         {
5450                 let counterparty_keys = self.context.build_remote_transaction_keys();
5451                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5452                 let counterparty_commitment_tx = commitment_stats.tx;
5453
5454                 #[cfg(any(test, fuzzing))]
5455                 {
5456                         if !self.context.is_outbound() {
5457                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5458                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5459                                 if let Some(info) = projected_commit_tx_info {
5460                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5461                                         if info.total_pending_htlcs == total_pending_htlcs
5462                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5463                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5464                                                 && info.feerate == self.context.feerate_per_kw {
5465                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5466                                                         assert_eq!(actual_fee, info.fee);
5467                                                 }
5468                                 }
5469                         }
5470                 }
5471
5472                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5473         }
5474
5475         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5476         /// generation when we shouldn't change HTLC/channel state.
5477         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5478                 // Get the fee tests from `build_commitment_no_state_update`
5479                 #[cfg(any(test, fuzzing))]
5480                 self.build_commitment_no_state_update(logger);
5481
5482                 let counterparty_keys = self.context.build_remote_transaction_keys();
5483                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5484                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5485
5486                 match &self.context.holder_signer {
5487                         ChannelSignerType::Ecdsa(ecdsa) => {
5488                                 let (signature, htlc_signatures);
5489
5490                                 {
5491                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5492                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5493                                                 htlcs.push(htlc);
5494                                         }
5495
5496                                         let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5497                                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
5498                                         signature = res.0;
5499                                         htlc_signatures = res.1;
5500
5501                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5502                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5503                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5504                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5505
5506                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5507                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5508                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5509                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5510                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5511                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5512                                         }
5513                                 }
5514
5515                                 Ok((msgs::CommitmentSigned {
5516                                         channel_id: self.context.channel_id,
5517                                         signature,
5518                                         htlc_signatures,
5519                                         #[cfg(taproot)]
5520                                         partial_signature_with_nonce: None,
5521                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5522                         }
5523                 }
5524         }
5525
5526         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5527         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5528         ///
5529         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5530         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5531         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5532                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5533                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5534                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5535         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5536         where F::Target: FeeEstimator, L::Target: Logger
5537         {
5538                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5539                         onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5540                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5541                 match send_res? {
5542                         Some(_) => {
5543                                 let monitor_update = self.build_commitment_no_status_check(logger);
5544                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5545                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
5546                         },
5547                         None => Ok(None)
5548                 }
5549         }
5550
5551         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5552         /// happened.
5553         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5554                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5555                         fee_base_msat: msg.contents.fee_base_msat,
5556                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5557                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
5558                 });
5559                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5560                 if did_change {
5561                         self.context.counterparty_forwarding_info = new_forwarding_info;
5562                 }
5563
5564                 Ok(did_change)
5565         }
5566
5567         /// Begins the shutdown process, getting a message for the remote peer and returning all
5568         /// holding cell HTLCs for payment failure.
5569         ///
5570         /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5571         /// [`ChannelMonitorUpdate`] will be returned).
5572         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5573                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5574         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
5575         {
5576                 for htlc in self.context.pending_outbound_htlcs.iter() {
5577                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5578                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5579                         }
5580                 }
5581                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5582                         if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5583                                 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5584                         }
5585                         else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5586                                 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5587                         }
5588                 }
5589                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5590                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5591                 }
5592                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5593                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5594                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5595                 }
5596
5597                 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5598                 // script is set, we just force-close and call it a day.
5599                 let mut chan_closed = false;
5600                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5601                         chan_closed = true;
5602                 }
5603
5604                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5605                         Some(_) => false,
5606                         None if !chan_closed => {
5607                                 // use override shutdown script if provided
5608                                 let shutdown_scriptpubkey = match override_shutdown_script {
5609                                         Some(script) => script,
5610                                         None => {
5611                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
5612                                                 match signer_provider.get_shutdown_scriptpubkey() {
5613                                                         Ok(scriptpubkey) => scriptpubkey,
5614                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5615                                                 }
5616                                         },
5617                                 };
5618                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5619                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5620                                 }
5621                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5622                                 true
5623                         },
5624                         None => false,
5625                 };
5626
5627                 // From here on out, we may not fail!
5628                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5629                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5630                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
5631                 } else {
5632                         self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5633                 }
5634                 self.context.update_time_counter += 1;
5635
5636                 let monitor_update = if update_shutdown_script {
5637                         self.context.latest_monitor_update_id += 1;
5638                         let monitor_update = ChannelMonitorUpdate {
5639                                 update_id: self.context.latest_monitor_update_id,
5640                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5641                                         scriptpubkey: self.get_closing_scriptpubkey(),
5642                                 }],
5643                         };
5644                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5645                         self.push_ret_blockable_mon_update(monitor_update)
5646                 } else { None };
5647                 let shutdown = msgs::Shutdown {
5648                         channel_id: self.context.channel_id,
5649                         scriptpubkey: self.get_closing_scriptpubkey(),
5650                 };
5651
5652                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5653                 // our shutdown until we've committed all of the pending changes.
5654                 self.context.holding_cell_update_fee = None;
5655                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5656                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5657                         match htlc_update {
5658                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5659                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5660                                         false
5661                                 },
5662                                 _ => true
5663                         }
5664                 });
5665
5666                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5667                         "we can't both complete shutdown and return a monitor update");
5668
5669                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5670         }
5671
5672         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5673                 self.context.holding_cell_htlc_updates.iter()
5674                         .flat_map(|htlc_update| {
5675                                 match htlc_update {
5676                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5677                                                 => Some((source, payment_hash)),
5678                                         _ => None,
5679                                 }
5680                         })
5681                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5682         }
5683 }
5684
5685 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5686 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5687         pub context: ChannelContext<SP>,
5688         pub unfunded_context: UnfundedChannelContext,
5689 }
5690
5691 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5692         pub fn new<ES: Deref, F: Deref>(
5693                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5694                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5695                 outbound_scid_alias: u64
5696         ) -> Result<OutboundV1Channel<SP>, APIError>
5697         where ES::Target: EntropySource,
5698               F::Target: FeeEstimator
5699         {
5700                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5701                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5702                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5703                 let pubkeys = holder_signer.pubkeys().clone();
5704
5705                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5706                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5707                 }
5708                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5709                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5710                 }
5711                 let channel_value_msat = channel_value_satoshis * 1000;
5712                 if push_msat > channel_value_msat {
5713                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5714                 }
5715                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5716                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5717                 }
5718                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5719                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5720                         // Protocol level safety check in place, although it should never happen because
5721                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5722                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5723                 }
5724
5725                 let channel_type = Self::get_initial_channel_type(&config, their_features);
5726                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5727
5728                 let commitment_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5729                         ConfirmationTarget::MempoolMinimum
5730                 } else {
5731                         ConfirmationTarget::Normal
5732                 };
5733                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5734
5735                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5736                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5737                 if value_to_self_msat < commitment_tx_fee {
5738                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5739                 }
5740
5741                 let mut secp_ctx = Secp256k1::new();
5742                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5743
5744                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5745                         match signer_provider.get_shutdown_scriptpubkey() {
5746                                 Ok(scriptpubkey) => Some(scriptpubkey),
5747                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5748                         }
5749                 } else { None };
5750
5751                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5752                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
5753                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5754                         }
5755                 }
5756
5757                 let destination_script = match signer_provider.get_destination_script() {
5758                         Ok(script) => script,
5759                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5760                 };
5761
5762                 let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
5763
5764                 Ok(Self {
5765                         context: ChannelContext {
5766                                 user_id,
5767
5768                                 config: LegacyChannelConfig {
5769                                         options: config.channel_config.clone(),
5770                                         announced_channel: config.channel_handshake_config.announced_channel,
5771                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5772                                 },
5773
5774                                 prev_config: None,
5775
5776                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5777
5778                                 channel_id: temporary_channel_id,
5779                                 temporary_channel_id: Some(temporary_channel_id),
5780                                 channel_state: ChannelState::OurInitSent as u32,
5781                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
5782                                 secp_ctx,
5783                                 channel_value_satoshis,
5784
5785                                 latest_monitor_update_id: 0,
5786
5787                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
5788                                 shutdown_scriptpubkey,
5789                                 destination_script,
5790
5791                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5792                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5793                                 value_to_self_msat,
5794
5795                                 pending_inbound_htlcs: Vec::new(),
5796                                 pending_outbound_htlcs: Vec::new(),
5797                                 holding_cell_htlc_updates: Vec::new(),
5798                                 pending_update_fee: None,
5799                                 holding_cell_update_fee: None,
5800                                 next_holder_htlc_id: 0,
5801                                 next_counterparty_htlc_id: 0,
5802                                 update_time_counter: 1,
5803
5804                                 resend_order: RAACommitmentOrder::CommitmentFirst,
5805
5806                                 monitor_pending_channel_ready: false,
5807                                 monitor_pending_revoke_and_ack: false,
5808                                 monitor_pending_commitment_signed: false,
5809                                 monitor_pending_forwards: Vec::new(),
5810                                 monitor_pending_failures: Vec::new(),
5811                                 monitor_pending_finalized_fulfills: Vec::new(),
5812
5813                                 #[cfg(debug_assertions)]
5814                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5815                                 #[cfg(debug_assertions)]
5816                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5817
5818                                 last_sent_closing_fee: None,
5819                                 pending_counterparty_closing_signed: None,
5820                                 closing_fee_limits: None,
5821                                 target_closing_feerate_sats_per_kw: None,
5822
5823                                 funding_tx_confirmed_in: None,
5824                                 funding_tx_confirmation_height: 0,
5825                                 short_channel_id: None,
5826                                 channel_creation_height: current_chain_height,
5827
5828                                 feerate_per_kw: commitment_feerate,
5829                                 counterparty_dust_limit_satoshis: 0,
5830                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
5831                                 counterparty_max_htlc_value_in_flight_msat: 0,
5832                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
5833                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
5834                                 holder_selected_channel_reserve_satoshis,
5835                                 counterparty_htlc_minimum_msat: 0,
5836                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
5837                                 counterparty_max_accepted_htlcs: 0,
5838                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
5839                                 minimum_depth: None, // Filled in in accept_channel
5840
5841                                 counterparty_forwarding_info: None,
5842
5843                                 channel_transaction_parameters: ChannelTransactionParameters {
5844                                         holder_pubkeys: pubkeys,
5845                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
5846                                         is_outbound_from_holder: true,
5847                                         counterparty_parameters: None,
5848                                         funding_outpoint: None,
5849                                         channel_type_features: channel_type.clone()
5850                                 },
5851                                 funding_transaction: None,
5852                                 is_batch_funding: None,
5853
5854                                 counterparty_cur_commitment_point: None,
5855                                 counterparty_prev_commitment_point: None,
5856                                 counterparty_node_id,
5857
5858                                 counterparty_shutdown_scriptpubkey: None,
5859
5860                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
5861
5862                                 channel_update_status: ChannelUpdateStatus::Enabled,
5863                                 closing_signed_in_flight: false,
5864
5865                                 announcement_sigs: None,
5866
5867                                 #[cfg(any(test, fuzzing))]
5868                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
5869                                 #[cfg(any(test, fuzzing))]
5870                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
5871
5872                                 workaround_lnd_bug_4006: None,
5873                                 sent_message_awaiting_response: None,
5874
5875                                 latest_inbound_scid_alias: None,
5876                                 outbound_scid_alias,
5877
5878                                 channel_pending_event_emitted: false,
5879                                 channel_ready_event_emitted: false,
5880
5881                                 #[cfg(any(test, fuzzing))]
5882                                 historical_inbound_htlc_fulfills: HashSet::new(),
5883
5884                                 channel_type,
5885                                 channel_keys_id,
5886
5887                                 blocked_monitor_updates: Vec::new(),
5888                         },
5889                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
5890                 })
5891         }
5892
5893         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
5894         fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5895                 let counterparty_keys = self.context.build_remote_transaction_keys();
5896                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5897                 match &self.context.holder_signer {
5898                         // TODO (taproot|arik): move match into calling method for Taproot
5899                         ChannelSignerType::Ecdsa(ecdsa) => {
5900                                 Ok(ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5901                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5902                         }
5903                 }
5904         }
5905
5906         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5907         /// a funding_created message for the remote peer.
5908         /// Panics if called at some time other than immediately after initial handshake, if called twice,
5909         /// or if called on an inbound channel.
5910         /// Note that channel_id changes during this call!
5911         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5912         /// If an Err is returned, it is a ChannelError::Close.
5913         pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
5914         -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
5915                 if !self.context.is_outbound() {
5916                         panic!("Tried to create outbound funding_created message on an inbound channel!");
5917                 }
5918                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5919                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5920                 }
5921                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5922                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5923                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5924                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5925                 }
5926
5927                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5928                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
5929
5930                 let signature = match self.get_funding_created_signature(logger) {
5931                         Ok(res) => res,
5932                         Err(e) => {
5933                                 log_error!(logger, "Got bad signatures: {:?}!", e);
5934                                 self.context.channel_transaction_parameters.funding_outpoint = None;
5935                                 return Err((self, e));
5936                         }
5937                 };
5938
5939                 let temporary_channel_id = self.context.channel_id;
5940
5941                 // Now that we're past error-generating stuff, update our local state:
5942
5943                 self.context.channel_state = ChannelState::FundingCreated as u32;
5944                 self.context.channel_id = funding_txo.to_channel_id();
5945
5946                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
5947                 // We can skip this if it is a zero-conf channel.
5948                 if funding_transaction.is_coin_base() &&
5949                         self.context.minimum_depth.unwrap_or(0) > 0 &&
5950                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5951                         self.context.minimum_depth = Some(COINBASE_MATURITY);
5952                 }
5953
5954                 self.context.funding_transaction = Some(funding_transaction);
5955                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
5956
5957                 let channel = Channel {
5958                         context: self.context,
5959                 };
5960
5961                 Ok((channel, msgs::FundingCreated {
5962                         temporary_channel_id,
5963                         funding_txid: funding_txo.txid,
5964                         funding_output_index: funding_txo.index,
5965                         signature,
5966                         #[cfg(taproot)]
5967                         partial_signature_with_nonce: None,
5968                         #[cfg(taproot)]
5969                         next_local_nonce: None,
5970                 }))
5971         }
5972
5973         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
5974                 // The default channel type (ie the first one we try) depends on whether the channel is
5975                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
5976                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
5977                 // with no other changes, and fall back to `only_static_remotekey`.
5978                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
5979                 if !config.channel_handshake_config.announced_channel &&
5980                         config.channel_handshake_config.negotiate_scid_privacy &&
5981                         their_features.supports_scid_privacy() {
5982                         ret.set_scid_privacy_required();
5983                 }
5984
5985                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
5986                 // set it now. If they don't understand it, we'll fall back to our default of
5987                 // `only_static_remotekey`.
5988                 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
5989                         their_features.supports_anchors_zero_fee_htlc_tx() {
5990                         ret.set_anchors_zero_fee_htlc_tx_required();
5991                 }
5992
5993                 ret
5994         }
5995
5996         /// If we receive an error message, it may only be a rejection of the channel type we tried,
5997         /// not of our ability to open any channel at all. Thus, on error, we should first call this
5998         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
5999         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6000                 &mut self, chain_hash: BlockHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6001         ) -> Result<msgs::OpenChannel, ()>
6002         where
6003                 F::Target: FeeEstimator
6004         {
6005                 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6006                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6007                         // We've exhausted our options
6008                         return Err(());
6009                 }
6010                 // We support opening a few different types of channels. Try removing our additional
6011                 // features one by one until we've either arrived at our default or the counterparty has
6012                 // accepted one.
6013                 //
6014                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6015                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6016                 // checks whether the counterparty supports every feature, this would only happen if the
6017                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6018                 // whatever reason.
6019                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6020                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6021                         self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
6022                         assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6023                 } else if self.context.channel_type.supports_scid_privacy() {
6024                         self.context.channel_type.clear_scid_privacy();
6025                 } else {
6026                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6027                 }
6028                 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6029                 Ok(self.get_open_channel(chain_hash))
6030         }
6031
6032         pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
6033                 if !self.context.is_outbound() {
6034                         panic!("Tried to open a channel for an inbound channel?");
6035                 }
6036                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6037                         panic!("Cannot generate an open_channel after we've moved forward");
6038                 }
6039
6040                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6041                         panic!("Tried to send an open_channel for a channel that has already advanced");
6042                 }
6043
6044                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6045                 let keys = self.context.get_holder_pubkeys();
6046
6047                 msgs::OpenChannel {
6048                         chain_hash,
6049                         temporary_channel_id: self.context.channel_id,
6050                         funding_satoshis: self.context.channel_value_satoshis,
6051                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6052                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6053                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6054                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6055                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6056                         feerate_per_kw: self.context.feerate_per_kw as u32,
6057                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6058                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6059                         funding_pubkey: keys.funding_pubkey,
6060                         revocation_basepoint: keys.revocation_basepoint,
6061                         payment_point: keys.payment_point,
6062                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6063                         htlc_basepoint: keys.htlc_basepoint,
6064                         first_per_commitment_point,
6065                         channel_flags: if self.context.config.announced_channel {1} else {0},
6066                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6067                                 Some(script) => script.clone().into_inner(),
6068                                 None => Builder::new().into_script(),
6069                         }),
6070                         channel_type: Some(self.context.channel_type.clone()),
6071                 }
6072         }
6073
6074         // Message handlers
6075         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6076                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6077
6078                 // Check sanity of message fields:
6079                 if !self.context.is_outbound() {
6080                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6081                 }
6082                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6083                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6084                 }
6085                 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6086                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6087                 }
6088                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6089                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6090                 }
6091                 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6092                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6093                 }
6094                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6095                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6096                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6097                 }
6098                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6099                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6100                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6101                 }
6102                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6103                 if msg.to_self_delay > max_delay_acceptable {
6104                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6105                 }
6106                 if msg.max_accepted_htlcs < 1 {
6107                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6108                 }
6109                 if msg.max_accepted_htlcs > MAX_HTLCS {
6110                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6111                 }
6112
6113                 // Now check against optional parameters as set by config...
6114                 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6115                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6116                 }
6117                 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6118                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6119                 }
6120                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6121                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6122                 }
6123                 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6124                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6125                 }
6126                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6127                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6128                 }
6129                 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6130                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6131                 }
6132                 if msg.minimum_depth > peer_limits.max_minimum_depth {
6133                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6134                 }
6135
6136                 if let Some(ty) = &msg.channel_type {
6137                         if *ty != self.context.channel_type {
6138                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6139                         }
6140                 } else if their_features.supports_channel_type() {
6141                         // Assume they've accepted the channel type as they said they understand it.
6142                 } else {
6143                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6144                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6145                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6146                         }
6147                         self.context.channel_type = channel_type.clone();
6148                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
6149                 }
6150
6151                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6152                         match &msg.shutdown_scriptpubkey {
6153                                 &Some(ref script) => {
6154                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6155                                         if script.len() == 0 {
6156                                                 None
6157                                         } else {
6158                                                 if !script::is_bolt2_compliant(&script, their_features) {
6159                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6160                                                 }
6161                                                 Some(script.clone())
6162                                         }
6163                                 },
6164                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6165                                 &None => {
6166                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6167                                 }
6168                         }
6169                 } else { None };
6170
6171                 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6172                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6173                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6174                 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6175                 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6176
6177                 if peer_limits.trust_own_funding_0conf {
6178                         self.context.minimum_depth = Some(msg.minimum_depth);
6179                 } else {
6180                         self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6181                 }
6182
6183                 let counterparty_pubkeys = ChannelPublicKeys {
6184                         funding_pubkey: msg.funding_pubkey,
6185                         revocation_basepoint: msg.revocation_basepoint,
6186                         payment_point: msg.payment_point,
6187                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6188                         htlc_basepoint: msg.htlc_basepoint
6189                 };
6190
6191                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6192                         selected_contest_delay: msg.to_self_delay,
6193                         pubkeys: counterparty_pubkeys,
6194                 });
6195
6196                 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6197                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6198
6199                 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6200                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6201
6202                 Ok(())
6203         }
6204 }
6205
6206 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6207 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6208         pub context: ChannelContext<SP>,
6209         pub unfunded_context: UnfundedChannelContext,
6210 }
6211
6212 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6213         /// Creates a new channel from a remote sides' request for one.
6214         /// Assumes chain_hash has already been checked and corresponds with what we expect!
6215         pub fn new<ES: Deref, F: Deref, L: Deref>(
6216                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6217                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6218                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6219                 current_chain_height: u32, logger: &L, is_0conf: bool,
6220         ) -> Result<InboundV1Channel<SP>, ChannelError>
6221                 where ES::Target: EntropySource,
6222                           F::Target: FeeEstimator,
6223                           L::Target: Logger,
6224         {
6225                 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6226
6227                 // First check the channel type is known, failing before we do anything else if we don't
6228                 // support this channel type.
6229                 let channel_type = if let Some(channel_type) = &msg.channel_type {
6230                         if channel_type.supports_any_optional_bits() {
6231                                 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6232                         }
6233
6234                         // We only support the channel types defined by the `ChannelManager` in
6235                         // `provided_channel_type_features`. The channel type must always support
6236                         // `static_remote_key`.
6237                         if !channel_type.requires_static_remote_key() {
6238                                 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6239                         }
6240                         // Make sure we support all of the features behind the channel type.
6241                         if !channel_type.is_subset(our_supported_features) {
6242                                 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6243                         }
6244                         if channel_type.requires_scid_privacy() && announced_channel {
6245                                 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6246                         }
6247                         channel_type.clone()
6248                 } else {
6249                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6250                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6251                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6252                         }
6253                         channel_type
6254                 };
6255
6256                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6257                 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6258                 let pubkeys = holder_signer.pubkeys().clone();
6259                 let counterparty_pubkeys = ChannelPublicKeys {
6260                         funding_pubkey: msg.funding_pubkey,
6261                         revocation_basepoint: msg.revocation_basepoint,
6262                         payment_point: msg.payment_point,
6263                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6264                         htlc_basepoint: msg.htlc_basepoint
6265                 };
6266
6267                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6268                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6269                 }
6270
6271                 // Check sanity of message fields:
6272                 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6273                         return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6274                 }
6275                 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6276                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6277                 }
6278                 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6279                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6280                 }
6281                 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6282                 if msg.push_msat > full_channel_value_msat {
6283                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6284                 }
6285                 if msg.dust_limit_satoshis > msg.funding_satoshis {
6286                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6287                 }
6288                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6289                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6290                 }
6291                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6292
6293                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6294                 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6295                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6296                 }
6297                 if msg.max_accepted_htlcs < 1 {
6298                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6299                 }
6300                 if msg.max_accepted_htlcs > MAX_HTLCS {
6301                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6302                 }
6303
6304                 // Now check against optional parameters as set by config...
6305                 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6306                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6307                 }
6308                 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6309                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
6310                 }
6311                 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6312                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6313                 }
6314                 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6315                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6316                 }
6317                 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6318                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6319                 }
6320                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6321                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6322                 }
6323                 if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
6324                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6325                 }
6326
6327                 // Convert things into internal flags and prep our state:
6328
6329                 if config.channel_handshake_limits.force_announced_channel_preference {
6330                         if config.channel_handshake_config.announced_channel != announced_channel {
6331                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6332                         }
6333                 }
6334
6335                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6336                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6337                         // Protocol level safety check in place, although it should never happen because
6338                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6339                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6340                 }
6341                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6342                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6343                 }
6344                 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6345                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6346                                 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6347                 }
6348                 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6349                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6350                 }
6351
6352                 // check if the funder's amount for the initial commitment tx is sufficient
6353                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6354                 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6355                 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6356                 if funders_amount_msat / 1000 < commitment_tx_fee {
6357                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
6358                 }
6359
6360                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
6361                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6362                 // want to push much to us), our counterparty should always have more than our reserve.
6363                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6364                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6365                 }
6366
6367                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6368                         match &msg.shutdown_scriptpubkey {
6369                                 &Some(ref script) => {
6370                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6371                                         if script.len() == 0 {
6372                                                 None
6373                                         } else {
6374                                                 if !script::is_bolt2_compliant(&script, their_features) {
6375                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6376                                                 }
6377                                                 Some(script.clone())
6378                                         }
6379                                 },
6380                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6381                                 &None => {
6382                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6383                                 }
6384                         }
6385                 } else { None };
6386
6387                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6388                         match signer_provider.get_shutdown_scriptpubkey() {
6389                                 Ok(scriptpubkey) => Some(scriptpubkey),
6390                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6391                         }
6392                 } else { None };
6393
6394                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6395                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6396                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6397                         }
6398                 }
6399
6400                 let destination_script = match signer_provider.get_destination_script() {
6401                         Ok(script) => script,
6402                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6403                 };
6404
6405                 let mut secp_ctx = Secp256k1::new();
6406                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6407
6408                 let minimum_depth = if is_0conf {
6409                         Some(0)
6410                 } else {
6411                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6412                 };
6413
6414                 let chan = Self {
6415                         context: ChannelContext {
6416                                 user_id,
6417
6418                                 config: LegacyChannelConfig {
6419                                         options: config.channel_config.clone(),
6420                                         announced_channel,
6421                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6422                                 },
6423
6424                                 prev_config: None,
6425
6426                                 inbound_handshake_limits_override: None,
6427
6428                                 temporary_channel_id: Some(msg.temporary_channel_id),
6429                                 channel_id: msg.temporary_channel_id,
6430                                 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6431                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6432                                 secp_ctx,
6433
6434                                 latest_monitor_update_id: 0,
6435
6436                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6437                                 shutdown_scriptpubkey,
6438                                 destination_script,
6439
6440                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6441                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6442                                 value_to_self_msat: msg.push_msat,
6443
6444                                 pending_inbound_htlcs: Vec::new(),
6445                                 pending_outbound_htlcs: Vec::new(),
6446                                 holding_cell_htlc_updates: Vec::new(),
6447                                 pending_update_fee: None,
6448                                 holding_cell_update_fee: None,
6449                                 next_holder_htlc_id: 0,
6450                                 next_counterparty_htlc_id: 0,
6451                                 update_time_counter: 1,
6452
6453                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6454
6455                                 monitor_pending_channel_ready: false,
6456                                 monitor_pending_revoke_and_ack: false,
6457                                 monitor_pending_commitment_signed: false,
6458                                 monitor_pending_forwards: Vec::new(),
6459                                 monitor_pending_failures: Vec::new(),
6460                                 monitor_pending_finalized_fulfills: Vec::new(),
6461
6462                                 #[cfg(debug_assertions)]
6463                                 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6464                                 #[cfg(debug_assertions)]
6465                                 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6466
6467                                 last_sent_closing_fee: None,
6468                                 pending_counterparty_closing_signed: None,
6469                                 closing_fee_limits: None,
6470                                 target_closing_feerate_sats_per_kw: None,
6471
6472                                 funding_tx_confirmed_in: None,
6473                                 funding_tx_confirmation_height: 0,
6474                                 short_channel_id: None,
6475                                 channel_creation_height: current_chain_height,
6476
6477                                 feerate_per_kw: msg.feerate_per_kw,
6478                                 channel_value_satoshis: msg.funding_satoshis,
6479                                 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6480                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6481                                 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6482                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6483                                 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6484                                 holder_selected_channel_reserve_satoshis,
6485                                 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6486                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6487                                 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6488                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6489                                 minimum_depth,
6490
6491                                 counterparty_forwarding_info: None,
6492
6493                                 channel_transaction_parameters: ChannelTransactionParameters {
6494                                         holder_pubkeys: pubkeys,
6495                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6496                                         is_outbound_from_holder: false,
6497                                         counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6498                                                 selected_contest_delay: msg.to_self_delay,
6499                                                 pubkeys: counterparty_pubkeys,
6500                                         }),
6501                                         funding_outpoint: None,
6502                                         channel_type_features: channel_type.clone()
6503                                 },
6504                                 funding_transaction: None,
6505                                 is_batch_funding: None,
6506
6507                                 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6508                                 counterparty_prev_commitment_point: None,
6509                                 counterparty_node_id,
6510
6511                                 counterparty_shutdown_scriptpubkey,
6512
6513                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6514
6515                                 channel_update_status: ChannelUpdateStatus::Enabled,
6516                                 closing_signed_in_flight: false,
6517
6518                                 announcement_sigs: None,
6519
6520                                 #[cfg(any(test, fuzzing))]
6521                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6522                                 #[cfg(any(test, fuzzing))]
6523                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6524
6525                                 workaround_lnd_bug_4006: None,
6526                                 sent_message_awaiting_response: None,
6527
6528                                 latest_inbound_scid_alias: None,
6529                                 outbound_scid_alias: 0,
6530
6531                                 channel_pending_event_emitted: false,
6532                                 channel_ready_event_emitted: false,
6533
6534                                 #[cfg(any(test, fuzzing))]
6535                                 historical_inbound_htlc_fulfills: HashSet::new(),
6536
6537                                 channel_type,
6538                                 channel_keys_id,
6539
6540                                 blocked_monitor_updates: Vec::new(),
6541                         },
6542                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6543                 };
6544
6545                 Ok(chan)
6546         }
6547
6548         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6549         /// should be sent back to the counterparty node.
6550         ///
6551         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6552         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6553                 if self.context.is_outbound() {
6554                         panic!("Tried to send accept_channel for an outbound channel?");
6555                 }
6556                 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6557                         panic!("Tried to send accept_channel after channel had moved forward");
6558                 }
6559                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6560                         panic!("Tried to send an accept_channel for a channel that has already advanced");
6561                 }
6562
6563                 self.generate_accept_channel_message()
6564         }
6565
6566         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6567         /// inbound channel. If the intention is to accept an inbound channel, use
6568         /// [`InboundV1Channel::accept_inbound_channel`] instead.
6569         ///
6570         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6571         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6572                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6573                 let keys = self.context.get_holder_pubkeys();
6574
6575                 msgs::AcceptChannel {
6576                         temporary_channel_id: self.context.channel_id,
6577                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6578                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6579                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6580                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6581                         minimum_depth: self.context.minimum_depth.unwrap(),
6582                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6583                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6584                         funding_pubkey: keys.funding_pubkey,
6585                         revocation_basepoint: keys.revocation_basepoint,
6586                         payment_point: keys.payment_point,
6587                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6588                         htlc_basepoint: keys.htlc_basepoint,
6589                         first_per_commitment_point,
6590                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6591                                 Some(script) => script.clone().into_inner(),
6592                                 None => Builder::new().into_script(),
6593                         }),
6594                         channel_type: Some(self.context.channel_type.clone()),
6595                         #[cfg(taproot)]
6596                         next_local_nonce: None,
6597                 }
6598         }
6599
6600         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6601         /// inbound channel without accepting it.
6602         ///
6603         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6604         #[cfg(test)]
6605         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6606                 self.generate_accept_channel_message()
6607         }
6608
6609         fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
6610                 let funding_script = self.context.get_funding_redeemscript();
6611
6612                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6613                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6614                 {
6615                         let trusted_tx = initial_commitment_tx.trust();
6616                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6617                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6618                         // They sign the holder commitment transaction...
6619                         log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6620                                 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6621                                 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6622                                 encode::serialize_hex(&funding_script), &self.context.channel_id());
6623                         secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6624                 }
6625
6626                 let counterparty_keys = self.context.build_remote_transaction_keys();
6627                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6628
6629                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6630                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6631                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6632                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6633
6634                 match &self.context.holder_signer {
6635                         // TODO (arik): move match into calling method for Taproot
6636                         ChannelSignerType::Ecdsa(ecdsa) => {
6637                                 let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
6638                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
6639
6640                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
6641                                 Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature))
6642                         }
6643                 }
6644         }
6645
6646         pub fn funding_created<L: Deref>(
6647                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6648         ) -> Result<(Channel<SP>, msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
6649         where
6650                 L::Target: Logger
6651         {
6652                 if self.context.is_outbound() {
6653                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6654                 }
6655                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6656                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6657                         // remember the channel, so it's safe to just send an error_message here and drop the
6658                         // channel.
6659                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6660                 }
6661                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6662                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6663                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6664                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6665                 }
6666
6667                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6668                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6669                 // This is an externally observable change before we finish all our checks.  In particular
6670                 // funding_created_signature may fail.
6671                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6672
6673                 let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
6674                         Ok(res) => res,
6675                         Err(ChannelError::Close(e)) => {
6676                                 self.context.channel_transaction_parameters.funding_outpoint = None;
6677                                 return Err((self, ChannelError::Close(e)));
6678                         },
6679                         Err(e) => {
6680                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
6681                                 // to make sure we don't continue with an inconsistent state.
6682                                 panic!("unexpected error type from funding_created_signature {:?}", e);
6683                         }
6684                 };
6685
6686                 let holder_commitment_tx = HolderCommitmentTransaction::new(
6687                         initial_commitment_tx,
6688                         msg.signature,
6689                         Vec::new(),
6690                         &self.context.get_holder_pubkeys().funding_pubkey,
6691                         self.context.counterparty_funding_pubkey()
6692                 );
6693
6694                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6695                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6696                 }
6697
6698                 // Now that we're past error-generating stuff, update our local state:
6699
6700                 let funding_redeemscript = self.context.get_funding_redeemscript();
6701                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6702                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6703                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6704                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6705                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6706                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6707                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
6708                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6709                                                           &self.context.channel_transaction_parameters,
6710                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
6711                                                           obscure_factor,
6712                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
6713
6714                 channel_monitor.provide_initial_counterparty_commitment_tx(
6715                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6716                         self.context.cur_counterparty_commitment_transaction_number,
6717                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6718                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6719                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6720
6721                 self.context.channel_state = ChannelState::FundingSent as u32;
6722                 self.context.channel_id = funding_txo.to_channel_id();
6723                 self.context.cur_counterparty_commitment_transaction_number -= 1;
6724                 self.context.cur_holder_commitment_transaction_number -= 1;
6725
6726                 log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
6727
6728                 // Promote the channel to a full-fledged one now that we have updated the state and have a
6729                 // `ChannelMonitor`.
6730                 let mut channel = Channel {
6731                         context: self.context,
6732                 };
6733                 let channel_id = channel.context.channel_id.clone();
6734                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6735                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6736
6737                 Ok((channel, msgs::FundingSigned {
6738                         channel_id,
6739                         signature,
6740                         #[cfg(taproot)]
6741                         partial_signature_with_nonce: None,
6742                 }, channel_monitor))
6743         }
6744 }
6745
6746 const SERIALIZATION_VERSION: u8 = 3;
6747 const MIN_SERIALIZATION_VERSION: u8 = 2;
6748
6749 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6750         (0, FailRelay),
6751         (1, FailMalformed),
6752         (2, Fulfill),
6753 );
6754
6755 impl Writeable for ChannelUpdateStatus {
6756         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6757                 // We only care about writing out the current state as it was announced, ie only either
6758                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6759                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6760                 match self {
6761                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6762                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6763                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6764                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6765                 }
6766                 Ok(())
6767         }
6768 }
6769
6770 impl Readable for ChannelUpdateStatus {
6771         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6772                 Ok(match <u8 as Readable>::read(reader)? {
6773                         0 => ChannelUpdateStatus::Enabled,
6774                         1 => ChannelUpdateStatus::Disabled,
6775                         _ => return Err(DecodeError::InvalidValue),
6776                 })
6777         }
6778 }
6779
6780 impl Writeable for AnnouncementSigsState {
6781         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6782                 // We only care about writing out the current state as if we had just disconnected, at
6783                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6784                 match self {
6785                         AnnouncementSigsState::NotSent => 0u8.write(writer),
6786                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
6787                         AnnouncementSigsState::Committed => 0u8.write(writer),
6788                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6789                 }
6790         }
6791 }
6792
6793 impl Readable for AnnouncementSigsState {
6794         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6795                 Ok(match <u8 as Readable>::read(reader)? {
6796                         0 => AnnouncementSigsState::NotSent,
6797                         1 => AnnouncementSigsState::PeerReceived,
6798                         _ => return Err(DecodeError::InvalidValue),
6799                 })
6800         }
6801 }
6802
6803 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
6804         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6805                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6806                 // called.
6807
6808                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6809
6810                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6811                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6812                 // the low bytes now and the optional high bytes later.
6813                 let user_id_low = self.context.user_id as u64;
6814                 user_id_low.write(writer)?;
6815
6816                 // Version 1 deserializers expected to read parts of the config object here. Version 2
6817                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6818                 // `minimum_depth` we simply write dummy values here.
6819                 writer.write_all(&[0; 8])?;
6820
6821                 self.context.channel_id.write(writer)?;
6822                 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6823                 self.context.channel_value_satoshis.write(writer)?;
6824
6825                 self.context.latest_monitor_update_id.write(writer)?;
6826
6827                 let mut key_data = VecWriter(Vec::new());
6828                 // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
6829                 self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
6830                 assert!(key_data.0.len() < core::usize::MAX);
6831                 assert!(key_data.0.len() < core::u32::MAX as usize);
6832                 (key_data.0.len() as u32).write(writer)?;
6833                 writer.write_all(&key_data.0[..])?;
6834
6835                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6836                 // deserialized from that format.
6837                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6838                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6839                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6840                 }
6841                 self.context.destination_script.write(writer)?;
6842
6843                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6844                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6845                 self.context.value_to_self_msat.write(writer)?;
6846
6847                 let mut dropped_inbound_htlcs = 0;
6848                 for htlc in self.context.pending_inbound_htlcs.iter() {
6849                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6850                                 dropped_inbound_htlcs += 1;
6851                         }
6852                 }
6853                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6854                 for htlc in self.context.pending_inbound_htlcs.iter() {
6855                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6856                                 continue; // Drop
6857                         }
6858                         htlc.htlc_id.write(writer)?;
6859                         htlc.amount_msat.write(writer)?;
6860                         htlc.cltv_expiry.write(writer)?;
6861                         htlc.payment_hash.write(writer)?;
6862                         match &htlc.state {
6863                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6864                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6865                                         1u8.write(writer)?;
6866                                         htlc_state.write(writer)?;
6867                                 },
6868                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6869                                         2u8.write(writer)?;
6870                                         htlc_state.write(writer)?;
6871                                 },
6872                                 &InboundHTLCState::Committed => {
6873                                         3u8.write(writer)?;
6874                                 },
6875                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6876                                         4u8.write(writer)?;
6877                                         removal_reason.write(writer)?;
6878                                 },
6879                         }
6880                 }
6881
6882                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6883                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
6884
6885                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6886                 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
6887                         htlc.htlc_id.write(writer)?;
6888                         htlc.amount_msat.write(writer)?;
6889                         htlc.cltv_expiry.write(writer)?;
6890                         htlc.payment_hash.write(writer)?;
6891                         htlc.source.write(writer)?;
6892                         match &htlc.state {
6893                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6894                                         0u8.write(writer)?;
6895                                         onion_packet.write(writer)?;
6896                                 },
6897                                 &OutboundHTLCState::Committed => {
6898                                         1u8.write(writer)?;
6899                                 },
6900                                 &OutboundHTLCState::RemoteRemoved(_) => {
6901                                         // Treat this as a Committed because we haven't received the CS - they'll
6902                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6903                                         1u8.write(writer)?;
6904                                 },
6905                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6906                                         3u8.write(writer)?;
6907                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6908                                                 preimages.push(preimage);
6909                                         }
6910                                         let reason: Option<&HTLCFailReason> = outcome.into();
6911                                         reason.write(writer)?;
6912                                 }
6913                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6914                                         4u8.write(writer)?;
6915                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6916                                                 preimages.push(preimage);
6917                                         }
6918                                         let reason: Option<&HTLCFailReason> = outcome.into();
6919                                         reason.write(writer)?;
6920                                 }
6921                         }
6922                         if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
6923                                 if pending_outbound_skimmed_fees.is_empty() {
6924                                         for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
6925                                 }
6926                                 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
6927                         } else if !pending_outbound_skimmed_fees.is_empty() {
6928                                 pending_outbound_skimmed_fees.push(None);
6929                         }
6930                 }
6931
6932                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
6933                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
6934                 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
6935                         match update {
6936                                 &HTLCUpdateAwaitingACK::AddHTLC {
6937                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
6938                                         skimmed_fee_msat,
6939                                 } => {
6940                                         0u8.write(writer)?;
6941                                         amount_msat.write(writer)?;
6942                                         cltv_expiry.write(writer)?;
6943                                         payment_hash.write(writer)?;
6944                                         source.write(writer)?;
6945                                         onion_routing_packet.write(writer)?;
6946
6947                                         if let Some(skimmed_fee) = skimmed_fee_msat {
6948                                                 if holding_cell_skimmed_fees.is_empty() {
6949                                                         for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
6950                                                 }
6951                                                 holding_cell_skimmed_fees.push(Some(skimmed_fee));
6952                                         } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
6953                                 },
6954                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
6955                                         1u8.write(writer)?;
6956                                         payment_preimage.write(writer)?;
6957                                         htlc_id.write(writer)?;
6958                                 },
6959                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
6960                                         2u8.write(writer)?;
6961                                         htlc_id.write(writer)?;
6962                                         err_packet.write(writer)?;
6963                                 }
6964                         }
6965                 }
6966
6967                 match self.context.resend_order {
6968                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
6969                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
6970                 }
6971
6972                 self.context.monitor_pending_channel_ready.write(writer)?;
6973                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
6974                 self.context.monitor_pending_commitment_signed.write(writer)?;
6975
6976                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
6977                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
6978                         pending_forward.write(writer)?;
6979                         htlc_id.write(writer)?;
6980                 }
6981
6982                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
6983                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
6984                         htlc_source.write(writer)?;
6985                         payment_hash.write(writer)?;
6986                         fail_reason.write(writer)?;
6987                 }
6988
6989                 if self.context.is_outbound() {
6990                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
6991                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
6992                         Some(feerate).write(writer)?;
6993                 } else {
6994                         // As for inbound HTLCs, if the update was only announced and never committed in a
6995                         // commitment_signed, drop it.
6996                         None::<u32>.write(writer)?;
6997                 }
6998                 self.context.holding_cell_update_fee.write(writer)?;
6999
7000                 self.context.next_holder_htlc_id.write(writer)?;
7001                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7002                 self.context.update_time_counter.write(writer)?;
7003                 self.context.feerate_per_kw.write(writer)?;
7004
7005                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7006                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7007                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7008                 // consider the stale state on reload.
7009                 0u8.write(writer)?;
7010
7011                 self.context.funding_tx_confirmed_in.write(writer)?;
7012                 self.context.funding_tx_confirmation_height.write(writer)?;
7013                 self.context.short_channel_id.write(writer)?;
7014
7015                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7016                 self.context.holder_dust_limit_satoshis.write(writer)?;
7017                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7018
7019                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7020                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7021
7022                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7023                 self.context.holder_htlc_minimum_msat.write(writer)?;
7024                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7025
7026                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7027                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7028
7029                 match &self.context.counterparty_forwarding_info {
7030                         Some(info) => {
7031                                 1u8.write(writer)?;
7032                                 info.fee_base_msat.write(writer)?;
7033                                 info.fee_proportional_millionths.write(writer)?;
7034                                 info.cltv_expiry_delta.write(writer)?;
7035                         },
7036                         None => 0u8.write(writer)?
7037                 }
7038
7039                 self.context.channel_transaction_parameters.write(writer)?;
7040                 self.context.funding_transaction.write(writer)?;
7041
7042                 self.context.counterparty_cur_commitment_point.write(writer)?;
7043                 self.context.counterparty_prev_commitment_point.write(writer)?;
7044                 self.context.counterparty_node_id.write(writer)?;
7045
7046                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7047
7048                 self.context.commitment_secrets.write(writer)?;
7049
7050                 self.context.channel_update_status.write(writer)?;
7051
7052                 #[cfg(any(test, fuzzing))]
7053                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7054                 #[cfg(any(test, fuzzing))]
7055                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7056                         htlc.write(writer)?;
7057                 }
7058
7059                 // If the channel type is something other than only-static-remote-key, then we need to have
7060                 // older clients fail to deserialize this channel at all. If the type is
7061                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7062                 // out at all.
7063                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7064                         Some(&self.context.channel_type) } else { None };
7065
7066                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7067                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7068                 // a different percentage of the channel value then 10%, which older versions of LDK used
7069                 // to set it to before the percentage was made configurable.
7070                 let serialized_holder_selected_reserve =
7071                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7072                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7073
7074                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7075                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7076                 let serialized_holder_htlc_max_in_flight =
7077                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7078                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7079
7080                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7081                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7082
7083                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7084                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7085                 // we write the high bytes as an option here.
7086                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7087
7088                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7089
7090                 write_tlv_fields!(writer, {
7091                         (0, self.context.announcement_sigs, option),
7092                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7093                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
7094                         // them twice, once with their original default values above, and once as an option
7095                         // here. On the read side, old versions will simply ignore the odd-type entries here,
7096                         // and new versions map the default values to None and allow the TLV entries here to
7097                         // override that.
7098                         (1, self.context.minimum_depth, option),
7099                         (2, chan_type, option),
7100                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7101                         (4, serialized_holder_selected_reserve, option),
7102                         (5, self.context.config, required),
7103                         (6, serialized_holder_htlc_max_in_flight, option),
7104                         (7, self.context.shutdown_scriptpubkey, option),
7105                         (8, self.context.blocked_monitor_updates, optional_vec),
7106                         (9, self.context.target_closing_feerate_sats_per_kw, option),
7107                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7108                         (13, self.context.channel_creation_height, required),
7109                         (15, preimages, required_vec),
7110                         (17, self.context.announcement_sigs_state, required),
7111                         (19, self.context.latest_inbound_scid_alias, option),
7112                         (21, self.context.outbound_scid_alias, required),
7113                         (23, channel_ready_event_emitted, option),
7114                         (25, user_id_high_opt, option),
7115                         (27, self.context.channel_keys_id, required),
7116                         (28, holder_max_accepted_htlcs, option),
7117                         (29, self.context.temporary_channel_id, option),
7118                         (31, channel_pending_event_emitted, option),
7119                         (35, pending_outbound_skimmed_fees, optional_vec),
7120                         (37, holding_cell_skimmed_fees, optional_vec),
7121                         (38, self.context.is_batch_funding, option),
7122                 });
7123
7124                 Ok(())
7125         }
7126 }
7127
7128 const MAX_ALLOC_SIZE: usize = 64*1024;
7129 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7130                 where
7131                         ES::Target: EntropySource,
7132                         SP::Target: SignerProvider
7133 {
7134         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7135                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7136                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7137
7138                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7139                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7140                 // the low bytes now and the high bytes later.
7141                 let user_id_low: u64 = Readable::read(reader)?;
7142
7143                 let mut config = Some(LegacyChannelConfig::default());
7144                 if ver == 1 {
7145                         // Read the old serialization of the ChannelConfig from version 0.0.98.
7146                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7147                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7148                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7149                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7150                 } else {
7151                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7152                         let mut _val: u64 = Readable::read(reader)?;
7153                 }
7154
7155                 let channel_id = Readable::read(reader)?;
7156                 let channel_state = Readable::read(reader)?;
7157                 let channel_value_satoshis = Readable::read(reader)?;
7158
7159                 let latest_monitor_update_id = Readable::read(reader)?;
7160
7161                 let mut keys_data = None;
7162                 if ver <= 2 {
7163                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7164                         // the `channel_keys_id` TLV is present below.
7165                         let keys_len: u32 = Readable::read(reader)?;
7166                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7167                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
7168                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7169                                 let mut data = [0; 1024];
7170                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7171                                 reader.read_exact(read_slice)?;
7172                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7173                         }
7174                 }
7175
7176                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7177                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7178                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7179                         Err(_) => None,
7180                 };
7181                 let destination_script = Readable::read(reader)?;
7182
7183                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7184                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7185                 let value_to_self_msat = Readable::read(reader)?;
7186
7187                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7188
7189                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7190                 for _ in 0..pending_inbound_htlc_count {
7191                         pending_inbound_htlcs.push(InboundHTLCOutput {
7192                                 htlc_id: Readable::read(reader)?,
7193                                 amount_msat: Readable::read(reader)?,
7194                                 cltv_expiry: Readable::read(reader)?,
7195                                 payment_hash: Readable::read(reader)?,
7196                                 state: match <u8 as Readable>::read(reader)? {
7197                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7198                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7199                                         3 => InboundHTLCState::Committed,
7200                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7201                                         _ => return Err(DecodeError::InvalidValue),
7202                                 },
7203                         });
7204                 }
7205
7206                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7207                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7208                 for _ in 0..pending_outbound_htlc_count {
7209                         pending_outbound_htlcs.push(OutboundHTLCOutput {
7210                                 htlc_id: Readable::read(reader)?,
7211                                 amount_msat: Readable::read(reader)?,
7212                                 cltv_expiry: Readable::read(reader)?,
7213                                 payment_hash: Readable::read(reader)?,
7214                                 source: Readable::read(reader)?,
7215                                 state: match <u8 as Readable>::read(reader)? {
7216                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7217                                         1 => OutboundHTLCState::Committed,
7218                                         2 => {
7219                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7220                                                 OutboundHTLCState::RemoteRemoved(option.into())
7221                                         },
7222                                         3 => {
7223                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7224                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7225                                         },
7226                                         4 => {
7227                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7228                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7229                                         },
7230                                         _ => return Err(DecodeError::InvalidValue),
7231                                 },
7232                                 skimmed_fee_msat: None,
7233                         });
7234                 }
7235
7236                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7237                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7238                 for _ in 0..holding_cell_htlc_update_count {
7239                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7240                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
7241                                         amount_msat: Readable::read(reader)?,
7242                                         cltv_expiry: Readable::read(reader)?,
7243                                         payment_hash: Readable::read(reader)?,
7244                                         source: Readable::read(reader)?,
7245                                         onion_routing_packet: Readable::read(reader)?,
7246                                         skimmed_fee_msat: None,
7247                                 },
7248                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7249                                         payment_preimage: Readable::read(reader)?,
7250                                         htlc_id: Readable::read(reader)?,
7251                                 },
7252                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
7253                                         htlc_id: Readable::read(reader)?,
7254                                         err_packet: Readable::read(reader)?,
7255                                 },
7256                                 _ => return Err(DecodeError::InvalidValue),
7257                         });
7258                 }
7259
7260                 let resend_order = match <u8 as Readable>::read(reader)? {
7261                         0 => RAACommitmentOrder::CommitmentFirst,
7262                         1 => RAACommitmentOrder::RevokeAndACKFirst,
7263                         _ => return Err(DecodeError::InvalidValue),
7264                 };
7265
7266                 let monitor_pending_channel_ready = Readable::read(reader)?;
7267                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7268                 let monitor_pending_commitment_signed = Readable::read(reader)?;
7269
7270                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7271                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7272                 for _ in 0..monitor_pending_forwards_count {
7273                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7274                 }
7275
7276                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7277                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7278                 for _ in 0..monitor_pending_failures_count {
7279                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7280                 }
7281
7282                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7283
7284                 let holding_cell_update_fee = Readable::read(reader)?;
7285
7286                 let next_holder_htlc_id = Readable::read(reader)?;
7287                 let next_counterparty_htlc_id = Readable::read(reader)?;
7288                 let update_time_counter = Readable::read(reader)?;
7289                 let feerate_per_kw = Readable::read(reader)?;
7290
7291                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7292                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7293                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7294                 // consider the stale state on reload.
7295                 match <u8 as Readable>::read(reader)? {
7296                         0 => {},
7297                         1 => {
7298                                 let _: u32 = Readable::read(reader)?;
7299                                 let _: u64 = Readable::read(reader)?;
7300                                 let _: Signature = Readable::read(reader)?;
7301                         },
7302                         _ => return Err(DecodeError::InvalidValue),
7303                 }
7304
7305                 let funding_tx_confirmed_in = Readable::read(reader)?;
7306                 let funding_tx_confirmation_height = Readable::read(reader)?;
7307                 let short_channel_id = Readable::read(reader)?;
7308
7309                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7310                 let holder_dust_limit_satoshis = Readable::read(reader)?;
7311                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7312                 let mut counterparty_selected_channel_reserve_satoshis = None;
7313                 if ver == 1 {
7314                         // Read the old serialization from version 0.0.98.
7315                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7316                 } else {
7317                         // Read the 8 bytes of backwards-compatibility data.
7318                         let _dummy: u64 = Readable::read(reader)?;
7319                 }
7320                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7321                 let holder_htlc_minimum_msat = Readable::read(reader)?;
7322                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7323
7324                 let mut minimum_depth = None;
7325                 if ver == 1 {
7326                         // Read the old serialization from version 0.0.98.
7327                         minimum_depth = Some(Readable::read(reader)?);
7328                 } else {
7329                         // Read the 4 bytes of backwards-compatibility data.
7330                         let _dummy: u32 = Readable::read(reader)?;
7331                 }
7332
7333                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7334                         0 => None,
7335                         1 => Some(CounterpartyForwardingInfo {
7336                                 fee_base_msat: Readable::read(reader)?,
7337                                 fee_proportional_millionths: Readable::read(reader)?,
7338                                 cltv_expiry_delta: Readable::read(reader)?,
7339                         }),
7340                         _ => return Err(DecodeError::InvalidValue),
7341                 };
7342
7343                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7344                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7345
7346                 let counterparty_cur_commitment_point = Readable::read(reader)?;
7347
7348                 let counterparty_prev_commitment_point = Readable::read(reader)?;
7349                 let counterparty_node_id = Readable::read(reader)?;
7350
7351                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7352                 let commitment_secrets = Readable::read(reader)?;
7353
7354                 let channel_update_status = Readable::read(reader)?;
7355
7356                 #[cfg(any(test, fuzzing))]
7357                 let mut historical_inbound_htlc_fulfills = HashSet::new();
7358                 #[cfg(any(test, fuzzing))]
7359                 {
7360                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
7361                         for _ in 0..htlc_fulfills_len {
7362                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7363                         }
7364                 }
7365
7366                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7367                         Some((feerate, if channel_parameters.is_outbound_from_holder {
7368                                 FeeUpdateState::Outbound
7369                         } else {
7370                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7371                         }))
7372                 } else {
7373                         None
7374                 };
7375
7376                 let mut announcement_sigs = None;
7377                 let mut target_closing_feerate_sats_per_kw = None;
7378                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7379                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7380                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7381                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7382                 // only, so we default to that if none was written.
7383                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7384                 let mut channel_creation_height = Some(serialized_height);
7385                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7386
7387                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7388                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7389                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7390                 let mut latest_inbound_scid_alias = None;
7391                 let mut outbound_scid_alias = None;
7392                 let mut channel_pending_event_emitted = None;
7393                 let mut channel_ready_event_emitted = None;
7394
7395                 let mut user_id_high_opt: Option<u64> = None;
7396                 let mut channel_keys_id: Option<[u8; 32]> = None;
7397                 let mut temporary_channel_id: Option<ChannelId> = None;
7398                 let mut holder_max_accepted_htlcs: Option<u16> = None;
7399
7400                 let mut blocked_monitor_updates = Some(Vec::new());
7401
7402                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7403                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7404
7405                 let mut is_batch_funding: Option<()> = None;
7406
7407                 read_tlv_fields!(reader, {
7408                         (0, announcement_sigs, option),
7409                         (1, minimum_depth, option),
7410                         (2, channel_type, option),
7411                         (3, counterparty_selected_channel_reserve_satoshis, option),
7412                         (4, holder_selected_channel_reserve_satoshis, option),
7413                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7414                         (6, holder_max_htlc_value_in_flight_msat, option),
7415                         (7, shutdown_scriptpubkey, option),
7416                         (8, blocked_monitor_updates, optional_vec),
7417                         (9, target_closing_feerate_sats_per_kw, option),
7418                         (11, monitor_pending_finalized_fulfills, optional_vec),
7419                         (13, channel_creation_height, option),
7420                         (15, preimages_opt, optional_vec),
7421                         (17, announcement_sigs_state, option),
7422                         (19, latest_inbound_scid_alias, option),
7423                         (21, outbound_scid_alias, option),
7424                         (23, channel_ready_event_emitted, option),
7425                         (25, user_id_high_opt, option),
7426                         (27, channel_keys_id, option),
7427                         (28, holder_max_accepted_htlcs, option),
7428                         (29, temporary_channel_id, option),
7429                         (31, channel_pending_event_emitted, option),
7430                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
7431                         (37, holding_cell_skimmed_fees_opt, optional_vec),
7432                         (38, is_batch_funding, option),
7433                 });
7434
7435                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7436                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7437                         // If we've gotten to the funding stage of the channel, populate the signer with its
7438                         // required channel parameters.
7439                         let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7440                         if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7441                                 holder_signer.provide_channel_parameters(&channel_parameters);
7442                         }
7443                         (channel_keys_id, holder_signer)
7444                 } else {
7445                         // `keys_data` can be `None` if we had corrupted data.
7446                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7447                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7448                         (holder_signer.channel_keys_id(), holder_signer)
7449                 };
7450
7451                 if let Some(preimages) = preimages_opt {
7452                         let mut iter = preimages.into_iter();
7453                         for htlc in pending_outbound_htlcs.iter_mut() {
7454                                 match &htlc.state {
7455                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7456                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7457                                         }
7458                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7459                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7460                                         }
7461                                         _ => {}
7462                                 }
7463                         }
7464                         // We expect all preimages to be consumed above
7465                         if iter.next().is_some() {
7466                                 return Err(DecodeError::InvalidValue);
7467                         }
7468                 }
7469
7470                 let chan_features = channel_type.as_ref().unwrap();
7471                 if !chan_features.is_subset(our_supported_features) {
7472                         // If the channel was written by a new version and negotiated with features we don't
7473                         // understand yet, refuse to read it.
7474                         return Err(DecodeError::UnknownRequiredFeature);
7475                 }
7476
7477                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7478                 // To account for that, we're proactively setting/overriding the field here.
7479                 channel_parameters.channel_type_features = chan_features.clone();
7480
7481                 let mut secp_ctx = Secp256k1::new();
7482                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7483
7484                 // `user_id` used to be a single u64 value. In order to remain backwards
7485                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7486                 // separate u64 values.
7487                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7488
7489                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7490
7491                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7492                         let mut iter = skimmed_fees.into_iter();
7493                         for htlc in pending_outbound_htlcs.iter_mut() {
7494                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7495                         }
7496                         // We expect all skimmed fees to be consumed above
7497                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7498                 }
7499                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7500                         let mut iter = skimmed_fees.into_iter();
7501                         for htlc in holding_cell_htlc_updates.iter_mut() {
7502                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7503                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7504                                 }
7505                         }
7506                         // We expect all skimmed fees to be consumed above
7507                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7508                 }
7509
7510                 Ok(Channel {
7511                         context: ChannelContext {
7512                                 user_id,
7513
7514                                 config: config.unwrap(),
7515
7516                                 prev_config: None,
7517
7518                                 // Note that we don't care about serializing handshake limits as we only ever serialize
7519                                 // channel data after the handshake has completed.
7520                                 inbound_handshake_limits_override: None,
7521
7522                                 channel_id,
7523                                 temporary_channel_id,
7524                                 channel_state,
7525                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
7526                                 secp_ctx,
7527                                 channel_value_satoshis,
7528
7529                                 latest_monitor_update_id,
7530
7531                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7532                                 shutdown_scriptpubkey,
7533                                 destination_script,
7534
7535                                 cur_holder_commitment_transaction_number,
7536                                 cur_counterparty_commitment_transaction_number,
7537                                 value_to_self_msat,
7538
7539                                 holder_max_accepted_htlcs,
7540                                 pending_inbound_htlcs,
7541                                 pending_outbound_htlcs,
7542                                 holding_cell_htlc_updates,
7543
7544                                 resend_order,
7545
7546                                 monitor_pending_channel_ready,
7547                                 monitor_pending_revoke_and_ack,
7548                                 monitor_pending_commitment_signed,
7549                                 monitor_pending_forwards,
7550                                 monitor_pending_failures,
7551                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7552
7553                                 pending_update_fee,
7554                                 holding_cell_update_fee,
7555                                 next_holder_htlc_id,
7556                                 next_counterparty_htlc_id,
7557                                 update_time_counter,
7558                                 feerate_per_kw,
7559
7560                                 #[cfg(debug_assertions)]
7561                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7562                                 #[cfg(debug_assertions)]
7563                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7564
7565                                 last_sent_closing_fee: None,
7566                                 pending_counterparty_closing_signed: None,
7567                                 closing_fee_limits: None,
7568                                 target_closing_feerate_sats_per_kw,
7569
7570                                 funding_tx_confirmed_in,
7571                                 funding_tx_confirmation_height,
7572                                 short_channel_id,
7573                                 channel_creation_height: channel_creation_height.unwrap(),
7574
7575                                 counterparty_dust_limit_satoshis,
7576                                 holder_dust_limit_satoshis,
7577                                 counterparty_max_htlc_value_in_flight_msat,
7578                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7579                                 counterparty_selected_channel_reserve_satoshis,
7580                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7581                                 counterparty_htlc_minimum_msat,
7582                                 holder_htlc_minimum_msat,
7583                                 counterparty_max_accepted_htlcs,
7584                                 minimum_depth,
7585
7586                                 counterparty_forwarding_info,
7587
7588                                 channel_transaction_parameters: channel_parameters,
7589                                 funding_transaction,
7590                                 is_batch_funding,
7591
7592                                 counterparty_cur_commitment_point,
7593                                 counterparty_prev_commitment_point,
7594                                 counterparty_node_id,
7595
7596                                 counterparty_shutdown_scriptpubkey,
7597
7598                                 commitment_secrets,
7599
7600                                 channel_update_status,
7601                                 closing_signed_in_flight: false,
7602
7603                                 announcement_sigs,
7604
7605                                 #[cfg(any(test, fuzzing))]
7606                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7607                                 #[cfg(any(test, fuzzing))]
7608                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7609
7610                                 workaround_lnd_bug_4006: None,
7611                                 sent_message_awaiting_response: None,
7612
7613                                 latest_inbound_scid_alias,
7614                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7615                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7616
7617                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7618                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7619
7620                                 #[cfg(any(test, fuzzing))]
7621                                 historical_inbound_htlc_fulfills,
7622
7623                                 channel_type: channel_type.unwrap(),
7624                                 channel_keys_id,
7625
7626                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7627                         }
7628                 })
7629         }
7630 }
7631
7632 #[cfg(test)]
7633 mod tests {
7634         use std::cmp;
7635         use bitcoin::blockdata::script::{Script, Builder};
7636         use bitcoin::blockdata::transaction::{Transaction, TxOut};
7637         use bitcoin::blockdata::constants::genesis_block;
7638         use bitcoin::blockdata::opcodes;
7639         use bitcoin::network::constants::Network;
7640         use hex;
7641         use crate::ln::PaymentHash;
7642         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7643         use crate::ln::channel::InitFeatures;
7644         use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7645         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7646         use crate::ln::features::ChannelTypeFeatures;
7647         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7648         use crate::ln::script::ShutdownScript;
7649         use crate::ln::chan_utils;
7650         use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7651         use crate::chain::BestBlock;
7652         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7653         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7654         use crate::chain::transaction::OutPoint;
7655         use crate::routing::router::Path;
7656         use crate::util::config::UserConfig;
7657         use crate::util::errors::APIError;
7658         use crate::util::test_utils;
7659         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7660         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7661         use bitcoin::secp256k1::ffi::Signature as FFISignature;
7662         use bitcoin::secp256k1::{SecretKey,PublicKey};
7663         use bitcoin::hashes::sha256::Hash as Sha256;
7664         use bitcoin::hashes::Hash;
7665         use bitcoin::hash_types::WPubkeyHash;
7666         use bitcoin::PackedLockTime;
7667         use bitcoin::util::address::WitnessVersion;
7668         use crate::prelude::*;
7669
7670         struct TestFeeEstimator {
7671                 fee_est: u32
7672         }
7673         impl FeeEstimator for TestFeeEstimator {
7674                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7675                         self.fee_est
7676                 }
7677         }
7678
7679         #[test]
7680         fn test_max_funding_satoshis_no_wumbo() {
7681                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7682                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7683                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7684         }
7685
7686         #[test]
7687         fn test_no_fee_check_overflow() {
7688                 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7689                 // arithmetic, causing a panic with debug assertions enabled.
7690                 let fee_est = TestFeeEstimator { fee_est: 42 };
7691                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7692                 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
7693                         &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
7694                         u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7695         }
7696
7697         struct Keys {
7698                 signer: InMemorySigner,
7699         }
7700
7701         impl EntropySource for Keys {
7702                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7703         }
7704
7705         impl SignerProvider for Keys {
7706                 type Signer = InMemorySigner;
7707
7708                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7709                         self.signer.channel_keys_id()
7710                 }
7711
7712                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7713                         self.signer.clone()
7714                 }
7715
7716                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7717
7718                 fn get_destination_script(&self) -> Result<Script, ()> {
7719                         let secp_ctx = Secp256k1::signing_only();
7720                         let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7721                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7722                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7723                 }
7724
7725                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7726                         let secp_ctx = Secp256k1::signing_only();
7727                         let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7728                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7729                 }
7730         }
7731
7732         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7733         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7734                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7735         }
7736
7737         #[test]
7738         fn upfront_shutdown_script_incompatibility() {
7739                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7740                 let non_v0_segwit_shutdown_script =
7741                         ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7742
7743                 let seed = [42; 32];
7744                 let network = Network::Testnet;
7745                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7746                 keys_provider.expect(OnGetShutdownScriptpubkey {
7747                         returns: non_v0_segwit_shutdown_script.clone(),
7748                 });
7749
7750                 let secp_ctx = Secp256k1::new();
7751                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7752                 let config = UserConfig::default();
7753                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7754                         Err(APIError::IncompatibleShutdownScript { script }) => {
7755                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7756                         },
7757                         Err(e) => panic!("Unexpected error: {:?}", e),
7758                         Ok(_) => panic!("Expected error"),
7759                 }
7760         }
7761
7762         // Check that, during channel creation, we use the same feerate in the open channel message
7763         // as we do in the Channel object creation itself.
7764         #[test]
7765         fn test_open_channel_msg_fee() {
7766                 let original_fee = 253;
7767                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7768                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7769                 let secp_ctx = Secp256k1::new();
7770                 let seed = [42; 32];
7771                 let network = Network::Testnet;
7772                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7773
7774                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7775                 let config = UserConfig::default();
7776                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7777
7778                 // Now change the fee so we can check that the fee in the open_channel message is the
7779                 // same as the old fee.
7780                 fee_est.fee_est = 500;
7781                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7782                 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7783         }
7784
7785         #[test]
7786         fn test_holder_vs_counterparty_dust_limit() {
7787                 // Test that when calculating the local and remote commitment transaction fees, the correct
7788                 // dust limits are used.
7789                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7790                 let secp_ctx = Secp256k1::new();
7791                 let seed = [42; 32];
7792                 let network = Network::Testnet;
7793                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7794                 let logger = test_utils::TestLogger::new();
7795                 let best_block = BestBlock::from_network(network);
7796
7797                 // Go through the flow of opening a channel between two nodes, making sure
7798                 // they have different dust limits.
7799
7800                 // Create Node A's channel pointing to Node B's pubkey
7801                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7802                 let config = UserConfig::default();
7803                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7804
7805                 // Create Node B's channel by receiving Node A's open_channel message
7806                 // Make sure A's dust limit is as we expect.
7807                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7808                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7809                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7810
7811                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7812                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
7813                 accept_channel_msg.dust_limit_satoshis = 546;
7814                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7815                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7816
7817                 // Node A --> Node B: funding created
7818                 let output_script = node_a_chan.context.get_funding_redeemscript();
7819                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7820                         value: 10000000, script_pubkey: output_script.clone(),
7821                 }]};
7822                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7823                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7824                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7825
7826                 // Node B --> Node A: funding signed
7827                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7828
7829                 // Put some inbound and outbound HTLCs in A's channel.
7830                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7831                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7832                         htlc_id: 0,
7833                         amount_msat: htlc_amount_msat,
7834                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7835                         cltv_expiry: 300000000,
7836                         state: InboundHTLCState::Committed,
7837                 });
7838
7839                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7840                         htlc_id: 1,
7841                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7842                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7843                         cltv_expiry: 200000000,
7844                         state: OutboundHTLCState::Committed,
7845                         source: HTLCSource::OutboundRoute {
7846                                 path: Path { hops: Vec::new(), blinded_tail: None },
7847                                 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7848                                 first_hop_htlc_msat: 548,
7849                                 payment_id: PaymentId([42; 32]),
7850                         },
7851                         skimmed_fee_msat: None,
7852                 });
7853
7854                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7855                 // the dust limit check.
7856                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7857                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7858                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
7859                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7860
7861                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7862                 // of the HTLCs are seen to be above the dust limit.
7863                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7864                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
7865                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7866                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7867                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7868         }
7869
7870         #[test]
7871         fn test_timeout_vs_success_htlc_dust_limit() {
7872                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7873                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7874                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7875                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7876                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7877                 let secp_ctx = Secp256k1::new();
7878                 let seed = [42; 32];
7879                 let network = Network::Testnet;
7880                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7881
7882                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7883                 let config = UserConfig::default();
7884                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7885
7886                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
7887                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
7888
7889                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7890                 // counted as dust when it shouldn't be.
7891                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7892                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7893                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7894                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7895
7896                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7897                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7898                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7899                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7900                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7901
7902                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7903
7904                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7905                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7906                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7907                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7908                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7909
7910                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7911                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7912                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7913                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7914                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7915         }
7916
7917         #[test]
7918         fn channel_reestablish_no_updates() {
7919                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7920                 let logger = test_utils::TestLogger::new();
7921                 let secp_ctx = Secp256k1::new();
7922                 let seed = [42; 32];
7923                 let network = Network::Testnet;
7924                 let best_block = BestBlock::from_network(network);
7925                 let chain_hash = best_block.block_hash();
7926                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7927
7928                 // Go through the flow of opening a channel between two nodes.
7929
7930                 // Create Node A's channel pointing to Node B's pubkey
7931                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7932                 let config = UserConfig::default();
7933                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7934
7935                 // Create Node B's channel by receiving Node A's open_channel message
7936                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
7937                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7938                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7939
7940                 // Node B --> Node A: accept channel
7941                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
7942                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7943
7944                 // Node A --> Node B: funding created
7945                 let output_script = node_a_chan.context.get_funding_redeemscript();
7946                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7947                         value: 10000000, script_pubkey: output_script.clone(),
7948                 }]};
7949                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7950                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7951                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7952
7953                 // Node B --> Node A: funding signed
7954                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7955
7956                 // Now disconnect the two nodes and check that the commitment point in
7957                 // Node B's channel_reestablish message is sane.
7958                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
7959                 let msg = node_b_chan.get_channel_reestablish(&&logger);
7960                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7961                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7962                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7963
7964                 // Check that the commitment point in Node A's channel_reestablish message
7965                 // is sane.
7966                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
7967                 let msg = node_a_chan.get_channel_reestablish(&&logger);
7968                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7969                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7970                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7971         }
7972
7973         #[test]
7974         fn test_configured_holder_max_htlc_value_in_flight() {
7975                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7976                 let logger = test_utils::TestLogger::new();
7977                 let secp_ctx = Secp256k1::new();
7978                 let seed = [42; 32];
7979                 let network = Network::Testnet;
7980                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7981                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7982                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7983
7984                 let mut config_2_percent = UserConfig::default();
7985                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
7986                 let mut config_99_percent = UserConfig::default();
7987                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
7988                 let mut config_0_percent = UserConfig::default();
7989                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
7990                 let mut config_101_percent = UserConfig::default();
7991                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
7992
7993                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
7994                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7995                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
7996                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
7997                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
7998                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
7999
8000                 // Test with the upper bound - 1 of valid values (99%).
8001                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
8002                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8003                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8004
8005                 let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
8006
8007                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8008                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8009                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8010                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8011                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8012                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8013
8014                 // Test with the upper bound - 1 of valid values (99%).
8015                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8016                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8017                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8018
8019                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8020                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8021                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
8022                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8023                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8024
8025                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8026                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8027                 // than 100.
8028                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
8029                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8030                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8031
8032                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8033                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8034                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8035                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8036                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8037
8038                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8039                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8040                 // than 100.
8041                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8042                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8043                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8044         }
8045
8046         #[test]
8047         fn test_configured_holder_selected_channel_reserve_satoshis() {
8048
8049                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8050                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8051                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8052
8053                 // Test with valid but unreasonably high channel reserves
8054                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8055                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8056                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8057
8058                 // Test with calculated channel reserve less than lower bound
8059                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8060                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8061
8062                 // Test with invalid channel reserves since sum of both is greater than or equal
8063                 // to channel value
8064                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8065                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8066         }
8067
8068         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8069                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8070                 let logger = test_utils::TestLogger::new();
8071                 let secp_ctx = Secp256k1::new();
8072                 let seed = [42; 32];
8073                 let network = Network::Testnet;
8074                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8075                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8076                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8077
8078
8079                 let mut outbound_node_config = UserConfig::default();
8080                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8081                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
8082
8083                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8084                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8085
8086                 let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
8087                 let mut inbound_node_config = UserConfig::default();
8088                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8089
8090                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8091                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8092
8093                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8094
8095                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8096                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8097                 } else {
8098                         // Channel Negotiations failed
8099                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8100                         assert!(result.is_err());
8101                 }
8102         }
8103
8104         #[test]
8105         fn channel_update() {
8106                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8107                 let logger = test_utils::TestLogger::new();
8108                 let secp_ctx = Secp256k1::new();
8109                 let seed = [42; 32];
8110                 let network = Network::Testnet;
8111                 let best_block = BestBlock::from_network(network);
8112                 let chain_hash = genesis_block(network).header.block_hash();
8113                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8114
8115                 // Create Node A's channel pointing to Node B's pubkey
8116                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8117                 let config = UserConfig::default();
8118                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8119
8120                 // Create Node B's channel by receiving Node A's open_channel message
8121                 // Make sure A's dust limit is as we expect.
8122                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
8123                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8124                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8125
8126                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8127                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8128                 accept_channel_msg.dust_limit_satoshis = 546;
8129                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8130                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8131
8132                 // Node A --> Node B: funding created
8133                 let output_script = node_a_chan.context.get_funding_redeemscript();
8134                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8135                         value: 10000000, script_pubkey: output_script.clone(),
8136                 }]};
8137                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8138                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8139                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8140
8141                 // Node B --> Node A: funding signed
8142                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8143
8144                 // Make sure that receiving a channel update will update the Channel as expected.
8145                 let update = ChannelUpdate {
8146                         contents: UnsignedChannelUpdate {
8147                                 chain_hash,
8148                                 short_channel_id: 0,
8149                                 timestamp: 0,
8150                                 flags: 0,
8151                                 cltv_expiry_delta: 100,
8152                                 htlc_minimum_msat: 5,
8153                                 htlc_maximum_msat: MAX_VALUE_MSAT,
8154                                 fee_base_msat: 110,
8155                                 fee_proportional_millionths: 11,
8156                                 excess_data: Vec::new(),
8157                         },
8158                         signature: Signature::from(unsafe { FFISignature::new() })
8159                 };
8160                 assert!(node_a_chan.channel_update(&update).unwrap());
8161
8162                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8163                 // change our official htlc_minimum_msat.
8164                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8165                 match node_a_chan.context.counterparty_forwarding_info() {
8166                         Some(info) => {
8167                                 assert_eq!(info.cltv_expiry_delta, 100);
8168                                 assert_eq!(info.fee_base_msat, 110);
8169                                 assert_eq!(info.fee_proportional_millionths, 11);
8170                         },
8171                         None => panic!("expected counterparty forwarding info to be Some")
8172                 }
8173
8174                 assert!(!node_a_chan.channel_update(&update).unwrap());
8175         }
8176
8177         #[cfg(feature = "_test_vectors")]
8178         #[test]
8179         fn outbound_commitment_test() {
8180                 use bitcoin::util::sighash;
8181                 use bitcoin::consensus::encode::serialize;
8182                 use bitcoin::blockdata::transaction::EcdsaSighashType;
8183                 use bitcoin::hashes::hex::FromHex;
8184                 use bitcoin::hash_types::Txid;
8185                 use bitcoin::secp256k1::Message;
8186                 use crate::sign::EcdsaChannelSigner;
8187                 use crate::ln::PaymentPreimage;
8188                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8189                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8190                 use crate::util::logger::Logger;
8191                 use crate::sync::Arc;
8192
8193                 // Test vectors from BOLT 3 Appendices C and F (anchors):
8194                 let feeest = TestFeeEstimator{fee_est: 15000};
8195                 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8196                 let secp_ctx = Secp256k1::new();
8197
8198                 let mut signer = InMemorySigner::new(
8199                         &secp_ctx,
8200                         SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8201                         SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8202                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8203                         SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8204                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8205
8206                         // These aren't set in the test vectors:
8207                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8208                         10_000_000,
8209                         [0; 32],
8210                         [0; 32],
8211                 );
8212
8213                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8214                                 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8215                 let keys_provider = Keys { signer: signer.clone() };
8216
8217                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8218                 let mut config = UserConfig::default();
8219                 config.channel_handshake_config.announced_channel = false;
8220                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
8221                 chan.context.holder_dust_limit_satoshis = 546;
8222                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8223
8224                 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8225
8226                 let counterparty_pubkeys = ChannelPublicKeys {
8227                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8228                         revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8229                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8230                         delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8231                         htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8232                 };
8233                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8234                         CounterpartyChannelTransactionParameters {
8235                                 pubkeys: counterparty_pubkeys.clone(),
8236                                 selected_contest_delay: 144
8237                         });
8238                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8239                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8240
8241                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8242                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8243
8244                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8245                            hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8246
8247                 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8248                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8249
8250                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8251                 // derived from a commitment_seed, so instead we copy it here and call
8252                 // build_commitment_transaction.
8253                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8254                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8255                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8256                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8257                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8258
8259                 macro_rules! test_commitment {
8260                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8261                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8262                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8263                         };
8264                 }
8265
8266                 macro_rules! test_commitment_with_anchors {
8267                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8268                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8269                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8270                         };
8271                 }
8272
8273                 macro_rules! test_commitment_common {
8274                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8275                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8276                         } ) => { {
8277                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8278                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8279
8280                                         let htlcs = commitment_stats.htlcs_included.drain(..)
8281                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8282                                                 .collect();
8283                                         (commitment_stats.tx, htlcs)
8284                                 };
8285                                 let trusted_tx = commitment_tx.trust();
8286                                 let unsigned_tx = trusted_tx.built_transaction();
8287                                 let redeemscript = chan.context.get_funding_redeemscript();
8288                                 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8289                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8290                                 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8291                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8292
8293                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8294                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8295                                 let mut counterparty_htlc_sigs = Vec::new();
8296                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8297                                 $({
8298                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8299                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8300                                         counterparty_htlc_sigs.push(remote_signature);
8301                                 })*
8302                                 assert_eq!(htlcs.len(), per_htlc.len());
8303
8304                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
8305                                         commitment_tx.clone(),
8306                                         counterparty_signature,
8307                                         counterparty_htlc_sigs,
8308                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8309                                         chan.context.counterparty_funding_pubkey()
8310                                 );
8311                                 let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap();
8312                                 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8313
8314                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
8315                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8316                                 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8317
8318                                 // ((htlc, counterparty_sig), (index, holder_sig))
8319                                 let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate());
8320
8321                                 $({
8322                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
8323                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8324
8325                                         let ref htlc = htlcs[$htlc_idx];
8326                                         let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8327                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8328                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8329                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8330                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8331                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8332                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8333
8334                                         let mut preimage: Option<PaymentPreimage> = None;
8335                                         if !htlc.offered {
8336                                                 for i in 0..5 {
8337                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8338                                                         if out == htlc.payment_hash {
8339                                                                 preimage = Some(PaymentPreimage([i; 32]));
8340                                                         }
8341                                                 }
8342
8343                                                 assert!(preimage.is_some());
8344                                         }
8345
8346                                         let htlc_sig = htlc_sig_iter.next().unwrap();
8347                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8348                                         assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8349
8350                                         let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8351                                         assert_eq!(signature, *(htlc_sig.1).1, "htlc sig");
8352                                         let index = (htlc_sig.1).0;
8353                                         let channel_parameters = chan.context.channel_transaction_parameters.as_holder_broadcastable();
8354                                         let trusted_tx = holder_commitment_tx.trust();
8355                                         log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))));
8356                                         assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..],
8357                                                         hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8358                                 })*
8359                                 assert!(htlc_sig_iter.next().is_none());
8360                         } }
8361                 }
8362
8363                 // anchors: simple commitment tx with no HTLCs and single anchor
8364                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8365                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8366                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8367
8368                 // simple commitment tx with no HTLCs
8369                 chan.context.value_to_self_msat = 7000000000;
8370
8371                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8372                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8373                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8374
8375                 // anchors: simple commitment tx with no HTLCs
8376                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8377                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8378                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8379
8380                 chan.context.pending_inbound_htlcs.push({
8381                         let mut out = InboundHTLCOutput{
8382                                 htlc_id: 0,
8383                                 amount_msat: 1000000,
8384                                 cltv_expiry: 500,
8385                                 payment_hash: PaymentHash([0; 32]),
8386                                 state: InboundHTLCState::Committed,
8387                         };
8388                         out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8389                         out
8390                 });
8391                 chan.context.pending_inbound_htlcs.push({
8392                         let mut out = InboundHTLCOutput{
8393                                 htlc_id: 1,
8394                                 amount_msat: 2000000,
8395                                 cltv_expiry: 501,
8396                                 payment_hash: PaymentHash([0; 32]),
8397                                 state: InboundHTLCState::Committed,
8398                         };
8399                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8400                         out
8401                 });
8402                 chan.context.pending_outbound_htlcs.push({
8403                         let mut out = OutboundHTLCOutput{
8404                                 htlc_id: 2,
8405                                 amount_msat: 2000000,
8406                                 cltv_expiry: 502,
8407                                 payment_hash: PaymentHash([0; 32]),
8408                                 state: OutboundHTLCState::Committed,
8409                                 source: HTLCSource::dummy(),
8410                                 skimmed_fee_msat: None,
8411                         };
8412                         out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8413                         out
8414                 });
8415                 chan.context.pending_outbound_htlcs.push({
8416                         let mut out = OutboundHTLCOutput{
8417                                 htlc_id: 3,
8418                                 amount_msat: 3000000,
8419                                 cltv_expiry: 503,
8420                                 payment_hash: PaymentHash([0; 32]),
8421                                 state: OutboundHTLCState::Committed,
8422                                 source: HTLCSource::dummy(),
8423                                 skimmed_fee_msat: None,
8424                         };
8425                         out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8426                         out
8427                 });
8428                 chan.context.pending_inbound_htlcs.push({
8429                         let mut out = InboundHTLCOutput{
8430                                 htlc_id: 4,
8431                                 amount_msat: 4000000,
8432                                 cltv_expiry: 504,
8433                                 payment_hash: PaymentHash([0; 32]),
8434                                 state: InboundHTLCState::Committed,
8435                         };
8436                         out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8437                         out
8438                 });
8439
8440                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8441                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8442                 chan.context.feerate_per_kw = 0;
8443
8444                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8445                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8446                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8447
8448                                   { 0,
8449                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8450                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8451                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8452
8453                                   { 1,
8454                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8455                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8456                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8457
8458                                   { 2,
8459                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8460                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8461                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8462
8463                                   { 3,
8464                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8465                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8466                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8467
8468                                   { 4,
8469                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8470                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8471                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8472                 } );
8473
8474                 // commitment tx with seven outputs untrimmed (maximum feerate)
8475                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8476                 chan.context.feerate_per_kw = 647;
8477
8478                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8479                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8480                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8481
8482                                   { 0,
8483                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8484                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8485                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8486
8487                                   { 1,
8488                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8489                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8490                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8491
8492                                   { 2,
8493                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8494                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8495                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8496
8497                                   { 3,
8498                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8499                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8500                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8501
8502                                   { 4,
8503                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8504                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8505                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8506                 } );
8507
8508                 // commitment tx with six outputs untrimmed (minimum feerate)
8509                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8510                 chan.context.feerate_per_kw = 648;
8511
8512                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8513                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8514                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8515
8516                                   { 0,
8517                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8518                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8519                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8520
8521                                   { 1,
8522                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8523                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8524                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8525
8526                                   { 2,
8527                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8528                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8529                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8530
8531                                   { 3,
8532                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8533                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8534                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8535                 } );
8536
8537                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8538                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8539                 chan.context.feerate_per_kw = 645;
8540                 chan.context.holder_dust_limit_satoshis = 1001;
8541
8542                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8543                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8544                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8545
8546                                   { 0,
8547                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8548                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8549                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8550
8551                                   { 1,
8552                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8553                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8554                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8555
8556                                   { 2,
8557                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8558                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8559                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8560
8561                                   { 3,
8562                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8563                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8564                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8565                 } );
8566
8567                 // commitment tx with six outputs untrimmed (maximum feerate)
8568                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8569                 chan.context.feerate_per_kw = 2069;
8570                 chan.context.holder_dust_limit_satoshis = 546;
8571
8572                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8573                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8574                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8575
8576                                   { 0,
8577                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8578                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8579                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8580
8581                                   { 1,
8582                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8583                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8584                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8585
8586                                   { 2,
8587                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8588                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8589                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8590
8591                                   { 3,
8592                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8593                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8594                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8595                 } );
8596
8597                 // commitment tx with five outputs untrimmed (minimum feerate)
8598                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8599                 chan.context.feerate_per_kw = 2070;
8600
8601                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8602                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8603                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8604
8605                                   { 0,
8606                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8607                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8608                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8609
8610                                   { 1,
8611                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8612                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8613                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8614
8615                                   { 2,
8616                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8617                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8618                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8619                 } );
8620
8621                 // commitment tx with five outputs untrimmed (maximum feerate)
8622                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8623                 chan.context.feerate_per_kw = 2194;
8624
8625                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8626                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8627                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8628
8629                                   { 0,
8630                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8631                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8632                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8633
8634                                   { 1,
8635                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8636                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8637                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8638
8639                                   { 2,
8640                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8641                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8642                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8643                 } );
8644
8645                 // commitment tx with four outputs untrimmed (minimum feerate)
8646                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8647                 chan.context.feerate_per_kw = 2195;
8648
8649                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8650                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8651                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8652
8653                                   { 0,
8654                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8655                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8656                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8657
8658                                   { 1,
8659                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8660                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8661                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8662                 } );
8663
8664                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8665                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8666                 chan.context.feerate_per_kw = 2185;
8667                 chan.context.holder_dust_limit_satoshis = 2001;
8668                 let cached_channel_type = chan.context.channel_type;
8669                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8670
8671                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8672                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8673                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8674
8675                                   { 0,
8676                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8677                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8678                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8679
8680                                   { 1,
8681                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8682                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8683                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8684                 } );
8685
8686                 // commitment tx with four outputs untrimmed (maximum feerate)
8687                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8688                 chan.context.feerate_per_kw = 3702;
8689                 chan.context.holder_dust_limit_satoshis = 546;
8690                 chan.context.channel_type = cached_channel_type.clone();
8691
8692                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8693                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8694                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8695
8696                                   { 0,
8697                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8698                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8699                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8700
8701                                   { 1,
8702                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8703                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8704                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8705                 } );
8706
8707                 // commitment tx with three outputs untrimmed (minimum feerate)
8708                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8709                 chan.context.feerate_per_kw = 3703;
8710
8711                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8712                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8713                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8714
8715                                   { 0,
8716                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8717                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8718                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8719                 } );
8720
8721                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8722                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8723                 chan.context.feerate_per_kw = 3687;
8724                 chan.context.holder_dust_limit_satoshis = 3001;
8725                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8726
8727                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8728                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8729                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8730
8731                                   { 0,
8732                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8733                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8734                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8735                 } );
8736
8737                 // commitment tx with three outputs untrimmed (maximum feerate)
8738                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8739                 chan.context.feerate_per_kw = 4914;
8740                 chan.context.holder_dust_limit_satoshis = 546;
8741                 chan.context.channel_type = cached_channel_type.clone();
8742
8743                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8744                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8745                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8746
8747                                   { 0,
8748                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8749                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8750                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8751                 } );
8752
8753                 // commitment tx with two outputs untrimmed (minimum feerate)
8754                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8755                 chan.context.feerate_per_kw = 4915;
8756                 chan.context.holder_dust_limit_satoshis = 546;
8757
8758                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8759                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8760                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8761
8762                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8763                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8764                 chan.context.feerate_per_kw = 4894;
8765                 chan.context.holder_dust_limit_satoshis = 4001;
8766                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8767
8768                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8769                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8770                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8771
8772                 // commitment tx with two outputs untrimmed (maximum feerate)
8773                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8774                 chan.context.feerate_per_kw = 9651180;
8775                 chan.context.holder_dust_limit_satoshis = 546;
8776                 chan.context.channel_type = cached_channel_type.clone();
8777
8778                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8779                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8780                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8781
8782                 // commitment tx with one output untrimmed (minimum feerate)
8783                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8784                 chan.context.feerate_per_kw = 9651181;
8785
8786                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8787                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8788                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8789
8790                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8791                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8792                 chan.context.feerate_per_kw = 6216010;
8793                 chan.context.holder_dust_limit_satoshis = 4001;
8794                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8795
8796                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8797                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8798                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8799
8800                 // commitment tx with fee greater than funder amount
8801                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8802                 chan.context.feerate_per_kw = 9651936;
8803                 chan.context.holder_dust_limit_satoshis = 546;
8804                 chan.context.channel_type = cached_channel_type;
8805
8806                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8807                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8808                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8809
8810                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8811                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8812                 chan.context.feerate_per_kw = 253;
8813                 chan.context.pending_inbound_htlcs.clear();
8814                 chan.context.pending_inbound_htlcs.push({
8815                         let mut out = InboundHTLCOutput{
8816                                 htlc_id: 1,
8817                                 amount_msat: 2000000,
8818                                 cltv_expiry: 501,
8819                                 payment_hash: PaymentHash([0; 32]),
8820                                 state: InboundHTLCState::Committed,
8821                         };
8822                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8823                         out
8824                 });
8825                 chan.context.pending_outbound_htlcs.clear();
8826                 chan.context.pending_outbound_htlcs.push({
8827                         let mut out = OutboundHTLCOutput{
8828                                 htlc_id: 6,
8829                                 amount_msat: 5000001,
8830                                 cltv_expiry: 506,
8831                                 payment_hash: PaymentHash([0; 32]),
8832                                 state: OutboundHTLCState::Committed,
8833                                 source: HTLCSource::dummy(),
8834                                 skimmed_fee_msat: None,
8835                         };
8836                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8837                         out
8838                 });
8839                 chan.context.pending_outbound_htlcs.push({
8840                         let mut out = OutboundHTLCOutput{
8841                                 htlc_id: 5,
8842                                 amount_msat: 5000000,
8843                                 cltv_expiry: 505,
8844                                 payment_hash: PaymentHash([0; 32]),
8845                                 state: OutboundHTLCState::Committed,
8846                                 source: HTLCSource::dummy(),
8847                                 skimmed_fee_msat: None,
8848                         };
8849                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8850                         out
8851                 });
8852
8853                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8854                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8855                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8856
8857                                   { 0,
8858                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8859                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8860                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8861                                   { 1,
8862                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8863                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8864                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8865                                   { 2,
8866                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8867                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8868                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8869                 } );
8870
8871                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8872                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8873                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8874                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8875
8876                                   { 0,
8877                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8878                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8879                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8880                                   { 1,
8881                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8882                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8883                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8884                                   { 2,
8885                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8886                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8887                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8888                 } );
8889         }
8890
8891         #[test]
8892         fn test_per_commitment_secret_gen() {
8893                 // Test vectors from BOLT 3 Appendix D:
8894
8895                 let mut seed = [0; 32];
8896                 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8897                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8898                            hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8899
8900                 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8901                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8902                            hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8903
8904                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8905                            hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8906
8907                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8908                            hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8909
8910                 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8911                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8912                            hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
8913         }
8914
8915         #[test]
8916         fn test_key_derivation() {
8917                 // Test vectors from BOLT 3 Appendix E:
8918                 let secp_ctx = Secp256k1::new();
8919
8920                 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
8921                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8922
8923                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
8924                 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
8925
8926                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8927                 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
8928
8929                 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8930                                 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
8931
8932                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
8933                                 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
8934
8935                 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8936                                 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
8937
8938                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
8939                                 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
8940         }
8941
8942         #[test]
8943         fn test_zero_conf_channel_type_support() {
8944                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8945                 let secp_ctx = Secp256k1::new();
8946                 let seed = [42; 32];
8947                 let network = Network::Testnet;
8948                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8949                 let logger = test_utils::TestLogger::new();
8950
8951                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8952                 let config = UserConfig::default();
8953                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
8954                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8955
8956                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8957                 channel_type_features.set_zero_conf_required();
8958
8959                 let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
8960                 open_channel_msg.channel_type = Some(channel_type_features);
8961                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8962                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
8963                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
8964                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
8965                 assert!(res.is_ok());
8966         }
8967
8968         #[test]
8969         fn test_supports_anchors_zero_htlc_tx_fee() {
8970                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
8971                 // resulting `channel_type`.
8972                 let secp_ctx = Secp256k1::new();
8973                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8974                 let network = Network::Testnet;
8975                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8976                 let logger = test_utils::TestLogger::new();
8977
8978                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8979                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8980
8981                 let mut config = UserConfig::default();
8982                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
8983
8984                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
8985                 // need to signal it.
8986                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
8987                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8988                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
8989                         &config, 0, 42
8990                 ).unwrap();
8991                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
8992
8993                 let mut expected_channel_type = ChannelTypeFeatures::empty();
8994                 expected_channel_type.set_static_remote_key_required();
8995                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
8996
8997                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
8998                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8999                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9000                 ).unwrap();
9001
9002                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9003                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9004                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9005                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9006                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9007                 ).unwrap();
9008
9009                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9010                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9011         }
9012
9013         #[test]
9014         fn test_rejects_implicit_simple_anchors() {
9015                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9016                 // each side's `InitFeatures`, it is rejected.
9017                 let secp_ctx = Secp256k1::new();
9018                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9019                 let network = Network::Testnet;
9020                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9021                 let logger = test_utils::TestLogger::new();
9022
9023                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9024                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9025
9026                 let config = UserConfig::default();
9027
9028                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9029                 let static_remote_key_required: u64 = 1 << 12;
9030                 let simple_anchors_required: u64 = 1 << 20;
9031                 let raw_init_features = static_remote_key_required | simple_anchors_required;
9032                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9033
9034                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9035                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9036                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9037                 ).unwrap();
9038
9039                 // Set `channel_type` to `None` to force the implicit feature negotiation.
9040                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9041                 open_channel_msg.channel_type = None;
9042
9043                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9044                 // `static_remote_key`, it will fail the channel.
9045                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9046                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9047                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9048                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9049                 );
9050                 assert!(channel_b.is_err());
9051         }
9052
9053         #[test]
9054         fn test_rejects_simple_anchors_channel_type() {
9055                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9056                 // it is rejected.
9057                 let secp_ctx = Secp256k1::new();
9058                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9059                 let network = Network::Testnet;
9060                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9061                 let logger = test_utils::TestLogger::new();
9062
9063                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9064                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9065
9066                 let config = UserConfig::default();
9067
9068                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9069                 let static_remote_key_required: u64 = 1 << 12;
9070                 let simple_anchors_required: u64 = 1 << 20;
9071                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9072                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9073                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9074                 assert!(!simple_anchors_init.requires_unknown_bits());
9075                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9076
9077                 // First, we'll try to open a channel between A and B where A requests a channel type for
9078                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9079                 // B as it's not supported by LDK.
9080                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9081                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9082                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9083                 ).unwrap();
9084
9085                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9086                 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9087
9088                 let res = InboundV1Channel::<&TestKeysInterface>::new(
9089                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9090                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9091                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9092                 );
9093                 assert!(res.is_err());
9094
9095                 // Then, we'll try to open another channel where A requests a channel type for
9096                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9097                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9098                 // LDK.
9099                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9100                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9101                         10000000, 100000, 42, &config, 0, 42
9102                 ).unwrap();
9103
9104                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9105
9106                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9107                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9108                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9109                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9110                 ).unwrap();
9111
9112                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9113                 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9114
9115                 let res = channel_a.accept_channel(
9116                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9117                 );
9118                 assert!(res.is_err());
9119         }
9120
9121         #[test]
9122         fn test_waiting_for_batch() {
9123                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9124                 let logger = test_utils::TestLogger::new();
9125                 let secp_ctx = Secp256k1::new();
9126                 let seed = [42; 32];
9127                 let network = Network::Testnet;
9128                 let best_block = BestBlock::from_network(network);
9129                 let chain_hash = genesis_block(network).header.block_hash();
9130                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9131
9132                 let mut config = UserConfig::default();
9133                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9134                 // channel in a batch before all channels are ready.
9135                 config.channel_handshake_limits.trust_own_funding_0conf = true;
9136
9137                 // Create a channel from node a to node b that will be part of batch funding.
9138                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9139                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9140                         &feeest,
9141                         &&keys_provider,
9142                         &&keys_provider,
9143                         node_b_node_id,
9144                         &channelmanager::provided_init_features(&config),
9145                         10000000,
9146                         100000,
9147                         42,
9148                         &config,
9149                         0,
9150                         42,
9151                 ).unwrap();
9152
9153                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
9154                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9155                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9156                         &feeest,
9157                         &&keys_provider,
9158                         &&keys_provider,
9159                         node_b_node_id,
9160                         &channelmanager::provided_channel_type_features(&config),
9161                         &channelmanager::provided_init_features(&config),
9162                         &open_channel_msg,
9163                         7,
9164                         &config,
9165                         0,
9166                         &&logger,
9167                         true,  // Allow node b to send a 0conf channel_ready.
9168                 ).unwrap();
9169
9170                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9171                 node_a_chan.accept_channel(
9172                         &accept_channel_msg,
9173                         &config.channel_handshake_limits,
9174                         &channelmanager::provided_init_features(&config),
9175                 ).unwrap();
9176
9177                 // Fund the channel with a batch funding transaction.
9178                 let output_script = node_a_chan.context.get_funding_redeemscript();
9179                 let tx = Transaction {
9180                         version: 1,
9181                         lock_time: PackedLockTime::ZERO,
9182                         input: Vec::new(),
9183                         output: vec![
9184                                 TxOut {
9185                                         value: 10000000, script_pubkey: output_script.clone(),
9186                                 },
9187                                 TxOut {
9188                                         value: 10000000, script_pubkey: Builder::new().into_script(),
9189                                 },
9190                         ]};
9191                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9192                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9193                         tx.clone(),
9194                         funding_outpoint,
9195                         true,
9196                         &&logger,
9197                 ).map_err(|_| ()).unwrap();
9198                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9199                         &funding_created_msg,
9200                         best_block,
9201                         &&keys_provider,
9202                         &&logger,
9203                 ).map_err(|_| ()).unwrap();
9204                 let node_b_updates = node_b_chan.monitor_updating_restored(
9205                         &&logger,
9206                         &&keys_provider,
9207                         chain_hash,
9208                         &config,
9209                         0,
9210                 );
9211
9212                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9213                 // broadcasting the funding transaction until the batch is ready.
9214                 let _ = node_a_chan.funding_signed(
9215                         &funding_signed_msg,
9216                         best_block,
9217                         &&keys_provider,
9218                         &&logger,
9219                 ).unwrap();
9220                 let node_a_updates = node_a_chan.monitor_updating_restored(
9221                         &&logger,
9222                         &&keys_provider,
9223                         chain_hash,
9224                         &config,
9225                         0,
9226                 );
9227                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9228                 // as the funding transaction depends on all channels in the batch becoming ready.
9229                 assert!(node_a_updates.channel_ready.is_none());
9230                 assert!(node_a_updates.funding_broadcastable.is_none());
9231                 assert_eq!(
9232                         node_a_chan.context.channel_state,
9233                         ChannelState::FundingSent as u32 |
9234                         ChannelState::WaitingForBatch as u32,
9235                 );
9236
9237                 // It is possible to receive a 0conf channel_ready from the remote node.
9238                 node_a_chan.channel_ready(
9239                         &node_b_updates.channel_ready.unwrap(),
9240                         &&keys_provider,
9241                         chain_hash,
9242                         &config,
9243                         &best_block,
9244                         &&logger,
9245                 ).unwrap();
9246                 assert_eq!(
9247                         node_a_chan.context.channel_state,
9248                         ChannelState::FundingSent as u32 |
9249                         ChannelState::WaitingForBatch as u32 |
9250                         ChannelState::TheirChannelReady as u32,
9251                 );
9252
9253                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9254                 node_a_chan.set_batch_ready();
9255                 assert_eq!(
9256                         node_a_chan.context.channel_state,
9257                         ChannelState::FundingSent as u32 |
9258                         ChannelState::TheirChannelReady as u32,
9259                 );
9260                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
9261         }
9262 }