]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/channel.rs
7b14573a1a49cf227f0ff46fbd261f4f156e49af
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::script::{Script,Builder};
11 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
12 use bitcoin::util::sighash;
13 use bitcoin::consensus::encode;
14
15 use bitcoin::hashes::Hash;
16 use bitcoin::hashes::sha256::Hash as Sha256;
17 use bitcoin::hashes::sha256d::Hash as Sha256d;
18 use bitcoin::hash_types::{Txid, BlockHash};
19
20 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
21 use bitcoin::secp256k1::{PublicKey,SecretKey};
22 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
23 use bitcoin::secp256k1;
24
25 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
26 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
27 use crate::ln::msgs;
28 use crate::ln::msgs::DecodeError;
29 use crate::ln::script::{self, ShutdownScript};
30 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
31 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
32 use crate::ln::chan_utils;
33 use crate::ln::onion_utils::HTLCFailReason;
34 use crate::chain::BestBlock;
35 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
36 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
37 use crate::chain::transaction::{OutPoint, TransactionData};
38 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
39 use crate::events::ClosureReason;
40 use crate::routing::gossip::NodeId;
41 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
42 use crate::util::logger::Logger;
43 use crate::util::errors::APIError;
44 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
45 use crate::util::scid_utils::scid_from_parts;
46
47 use crate::io;
48 use crate::prelude::*;
49 use core::{cmp,mem,fmt};
50 use core::ops::Deref;
51 #[cfg(any(test, fuzzing, debug_assertions))]
52 use crate::sync::Mutex;
53 use bitcoin::hashes::hex::ToHex;
54 use crate::sign::type_resolver::ChannelSignerType;
55
56 #[cfg(test)]
57 pub struct ChannelValueStat {
58         pub value_to_self_msat: u64,
59         pub channel_value_msat: u64,
60         pub channel_reserve_msat: u64,
61         pub pending_outbound_htlcs_amount_msat: u64,
62         pub pending_inbound_htlcs_amount_msat: u64,
63         pub holding_cell_outbound_amount_msat: u64,
64         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
65         pub counterparty_dust_limit_msat: u64,
66 }
67
68 pub struct AvailableBalances {
69         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
70         pub balance_msat: u64,
71         /// Total amount available for our counterparty to send to us.
72         pub inbound_capacity_msat: u64,
73         /// Total amount available for us to send to our counterparty.
74         pub outbound_capacity_msat: u64,
75         /// The maximum value we can assign to the next outbound HTLC
76         pub next_outbound_htlc_limit_msat: u64,
77         /// The minimum value we can assign to the next outbound HTLC
78         pub next_outbound_htlc_minimum_msat: u64,
79 }
80
81 #[derive(Debug, Clone, Copy, PartialEq)]
82 enum FeeUpdateState {
83         // Inbound states mirroring InboundHTLCState
84         RemoteAnnounced,
85         AwaitingRemoteRevokeToAnnounce,
86         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
87         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
88         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
89         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
90         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
91
92         // Outbound state can only be `LocalAnnounced` or `Committed`
93         Outbound,
94 }
95
96 enum InboundHTLCRemovalReason {
97         FailRelay(msgs::OnionErrorPacket),
98         FailMalformed(([u8; 32], u16)),
99         Fulfill(PaymentPreimage),
100 }
101
102 enum InboundHTLCState {
103         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
104         /// update_add_htlc message for this HTLC.
105         RemoteAnnounced(PendingHTLCStatus),
106         /// Included in a received commitment_signed message (implying we've
107         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
108         /// state (see the example below). We have not yet included this HTLC in a
109         /// commitment_signed message because we are waiting on the remote's
110         /// aforementioned state revocation. One reason this missing remote RAA
111         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
112         /// is because every time we create a new "state", i.e. every time we sign a
113         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
114         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
115         /// sent provided the per_commitment_point for our current commitment tx.
116         /// The other reason we should not send a commitment_signed without their RAA
117         /// is because their RAA serves to ACK our previous commitment_signed.
118         ///
119         /// Here's an example of how an HTLC could come to be in this state:
120         /// remote --> update_add_htlc(prev_htlc)   --> local
121         /// remote --> commitment_signed(prev_htlc) --> local
122         /// remote <-- revoke_and_ack               <-- local
123         /// remote <-- commitment_signed(prev_htlc) <-- local
124         /// [note that here, the remote does not respond with a RAA]
125         /// remote --> update_add_htlc(this_htlc)   --> local
126         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
127         /// Now `this_htlc` will be assigned this state. It's unable to be officially
128         /// accepted, i.e. included in a commitment_signed, because we're missing the
129         /// RAA that provides our next per_commitment_point. The per_commitment_point
130         /// is used to derive commitment keys, which are used to construct the
131         /// signatures in a commitment_signed message.
132         /// Implies AwaitingRemoteRevoke.
133         ///
134         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
135         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
136         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
137         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
138         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
139         /// channel (before it can then get forwarded and/or removed).
140         /// Implies AwaitingRemoteRevoke.
141         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
142         Committed,
143         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
144         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
145         /// we'll drop it.
146         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
147         /// commitment transaction without it as otherwise we'll have to force-close the channel to
148         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
149         /// anyway). That said, ChannelMonitor does this for us (see
150         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
151         /// our own local state before then, once we're sure that the next commitment_signed and
152         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
153         LocalRemoved(InboundHTLCRemovalReason),
154 }
155
156 struct InboundHTLCOutput {
157         htlc_id: u64,
158         amount_msat: u64,
159         cltv_expiry: u32,
160         payment_hash: PaymentHash,
161         state: InboundHTLCState,
162 }
163
164 enum OutboundHTLCState {
165         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
166         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
167         /// we will promote to Committed (note that they may not accept it until the next time we
168         /// revoke, but we don't really care about that:
169         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
170         ///    money back (though we won't), and,
171         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
172         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
173         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
174         ///    we'll never get out of sync).
175         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
176         /// OutboundHTLCOutput's size just for a temporary bit
177         LocalAnnounced(Box<msgs::OnionPacket>),
178         Committed,
179         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
180         /// the change (though they'll need to revoke before we fail the payment).
181         RemoteRemoved(OutboundHTLCOutcome),
182         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
183         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
184         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
185         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
186         /// remote revoke_and_ack on a previous state before we can do so.
187         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
188         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
192         /// revoke_and_ack to drop completely.
193         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
194 }
195
196 #[derive(Clone)]
197 enum OutboundHTLCOutcome {
198         /// LDK version 0.0.105+ will always fill in the preimage here.
199         Success(Option<PaymentPreimage>),
200         Failure(HTLCFailReason),
201 }
202
203 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
204         fn from(o: Option<HTLCFailReason>) -> Self {
205                 match o {
206                         None => OutboundHTLCOutcome::Success(None),
207                         Some(r) => OutboundHTLCOutcome::Failure(r)
208                 }
209         }
210 }
211
212 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
213         fn into(self) -> Option<&'a HTLCFailReason> {
214                 match self {
215                         OutboundHTLCOutcome::Success(_) => None,
216                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
217                 }
218         }
219 }
220
221 struct OutboundHTLCOutput {
222         htlc_id: u64,
223         amount_msat: u64,
224         cltv_expiry: u32,
225         payment_hash: PaymentHash,
226         state: OutboundHTLCState,
227         source: HTLCSource,
228         skimmed_fee_msat: Option<u64>,
229 }
230
231 /// See AwaitingRemoteRevoke ChannelState for more info
232 enum HTLCUpdateAwaitingACK {
233         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
234                 // always outbound
235                 amount_msat: u64,
236                 cltv_expiry: u32,
237                 payment_hash: PaymentHash,
238                 source: HTLCSource,
239                 onion_routing_packet: msgs::OnionPacket,
240                 // The extra fee we're skimming off the top of this HTLC.
241                 skimmed_fee_msat: Option<u64>,
242         },
243         ClaimHTLC {
244                 payment_preimage: PaymentPreimage,
245                 htlc_id: u64,
246         },
247         FailHTLC {
248                 htlc_id: u64,
249                 err_packet: msgs::OnionErrorPacket,
250         },
251 }
252
253 /// There are a few "states" and then a number of flags which can be applied:
254 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
255 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
256 /// move on to `ChannelReady`.
257 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
258 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
259 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
260 enum ChannelState {
261         /// Implies we have (or are prepared to) send our open_channel/accept_channel message
262         OurInitSent = 1 << 0,
263         /// Implies we have received their `open_channel`/`accept_channel` message
264         TheirInitSent = 1 << 1,
265         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
266         /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
267         /// upon receipt of `funding_created`, so simply skip this state.
268         FundingCreated = 4,
269         /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
270         /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
271         /// and our counterparty consider the funding transaction confirmed.
272         FundingSent = 8,
273         /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
274         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
275         TheirChannelReady = 1 << 4,
276         /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
277         /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
278         OurChannelReady = 1 << 5,
279         ChannelReady = 64,
280         /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
281         /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
282         /// dance.
283         PeerDisconnected = 1 << 7,
284         /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
285         /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
286         /// sending any outbound messages until they've managed to finish.
287         MonitorUpdateInProgress = 1 << 8,
288         /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
289         /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
290         /// messages as then we will be unable to determine which HTLCs they included in their
291         /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
292         /// later.
293         /// Flag is set on `ChannelReady`.
294         AwaitingRemoteRevoke = 1 << 9,
295         /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
296         /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
297         /// to respond with our own shutdown message when possible.
298         RemoteShutdownSent = 1 << 10,
299         /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
300         /// point, we may not add any new HTLCs to the channel.
301         LocalShutdownSent = 1 << 11,
302         /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
303         /// to drop us, but we store this anyway.
304         ShutdownComplete = 4096,
305         /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
306         /// broadcasting of the funding transaction is being held until all channels in the batch
307         /// have received funding_signed and have their monitors persisted.
308         WaitingForBatch = 1 << 13,
309 }
310 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
311         ChannelState::LocalShutdownSent as u32 |
312         ChannelState::RemoteShutdownSent as u32;
313 const MULTI_STATE_FLAGS: u32 =
314         BOTH_SIDES_SHUTDOWN_MASK |
315         ChannelState::PeerDisconnected as u32 |
316         ChannelState::MonitorUpdateInProgress as u32;
317 const STATE_FLAGS: u32 =
318         MULTI_STATE_FLAGS |
319         ChannelState::TheirChannelReady as u32 |
320         ChannelState::OurChannelReady as u32 |
321         ChannelState::AwaitingRemoteRevoke as u32 |
322         ChannelState::WaitingForBatch as u32;
323
324 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
325
326 pub const DEFAULT_MAX_HTLCS: u16 = 50;
327
328 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
329         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
330         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
331         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
332 }
333
334 #[cfg(not(test))]
335 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
336 #[cfg(test)]
337 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
338
339 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
340
341 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
342 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
343 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
344 /// `holder_max_htlc_value_in_flight_msat`.
345 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
346
347 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
348 /// `option_support_large_channel` (aka wumbo channels) is not supported.
349 /// It's 2^24 - 1.
350 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
351
352 /// Total bitcoin supply in satoshis.
353 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
354
355 /// The maximum network dust limit for standard script formats. This currently represents the
356 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
357 /// transaction non-standard and thus refuses to relay it.
358 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
359 /// implementations use this value for their dust limit today.
360 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
361
362 /// The maximum channel dust limit we will accept from our counterparty.
363 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
364
365 /// The dust limit is used for both the commitment transaction outputs as well as the closing
366 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
367 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
368 /// In order to avoid having to concern ourselves with standardness during the closing process, we
369 /// simply require our counterparty to use a dust limit which will leave any segwit output
370 /// standard.
371 /// See <https://github.com/lightning/bolts/issues/905> for more details.
372 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
373
374 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
375 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
376
377 /// Used to return a simple Error back to ChannelManager. Will get converted to a
378 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
379 /// channel_id in ChannelManager.
380 pub(super) enum ChannelError {
381         Ignore(String),
382         Warn(String),
383         Close(String),
384 }
385
386 impl fmt::Debug for ChannelError {
387         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
388                 match self {
389                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
390                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
391                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
392                 }
393         }
394 }
395
396 impl fmt::Display for ChannelError {
397         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
398                 match self {
399                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
400                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
401                         &ChannelError::Close(ref e) => write!(f, "{}", e),
402                 }
403         }
404 }
405
406 macro_rules! secp_check {
407         ($res: expr, $err: expr) => {
408                 match $res {
409                         Ok(thing) => thing,
410                         Err(_) => return Err(ChannelError::Close($err)),
411                 }
412         };
413 }
414
415 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
416 /// our counterparty or not. However, we don't want to announce updates right away to avoid
417 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
418 /// our channel_update message and track the current state here.
419 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
420 #[derive(Clone, Copy, PartialEq)]
421 pub(super) enum ChannelUpdateStatus {
422         /// We've announced the channel as enabled and are connected to our peer.
423         Enabled,
424         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
425         DisabledStaged(u8),
426         /// Our channel is live again, but we haven't announced the channel as enabled yet.
427         EnabledStaged(u8),
428         /// We've announced the channel as disabled.
429         Disabled,
430 }
431
432 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
433 #[derive(PartialEq)]
434 pub enum AnnouncementSigsState {
435         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
436         /// we sent the last `AnnouncementSignatures`.
437         NotSent,
438         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
439         /// This state never appears on disk - instead we write `NotSent`.
440         MessageSent,
441         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
442         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
443         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
444         /// they send back a `RevokeAndACK`.
445         /// This state never appears on disk - instead we write `NotSent`.
446         Committed,
447         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
448         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
449         PeerReceived,
450 }
451
452 /// An enum indicating whether the local or remote side offered a given HTLC.
453 enum HTLCInitiator {
454         LocalOffered,
455         RemoteOffered,
456 }
457
458 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
459 struct HTLCStats {
460         pending_htlcs: u32,
461         pending_htlcs_value_msat: u64,
462         on_counterparty_tx_dust_exposure_msat: u64,
463         on_holder_tx_dust_exposure_msat: u64,
464         holding_cell_msat: u64,
465         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
466 }
467
468 /// An enum gathering stats on commitment transaction, either local or remote.
469 struct CommitmentStats<'a> {
470         tx: CommitmentTransaction, // the transaction info
471         feerate_per_kw: u32, // the feerate included to build the transaction
472         total_fee_sat: u64, // the total fee included in the transaction
473         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
474         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
475         local_balance_msat: u64, // local balance before fees but considering dust limits
476         remote_balance_msat: u64, // remote balance before fees but considering dust limits
477         preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
478 }
479
480 /// Used when calculating whether we or the remote can afford an additional HTLC.
481 struct HTLCCandidate {
482         amount_msat: u64,
483         origin: HTLCInitiator,
484 }
485
486 impl HTLCCandidate {
487         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
488                 Self {
489                         amount_msat,
490                         origin,
491                 }
492         }
493 }
494
495 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
496 /// description
497 enum UpdateFulfillFetch {
498         NewClaim {
499                 monitor_update: ChannelMonitorUpdate,
500                 htlc_value_msat: u64,
501                 msg: Option<msgs::UpdateFulfillHTLC>,
502         },
503         DuplicateClaim {},
504 }
505
506 /// The return type of get_update_fulfill_htlc_and_commit.
507 pub enum UpdateFulfillCommitFetch {
508         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
509         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
510         /// previously placed in the holding cell (and has since been removed).
511         NewClaim {
512                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
513                 monitor_update: ChannelMonitorUpdate,
514                 /// The value of the HTLC which was claimed, in msat.
515                 htlc_value_msat: u64,
516         },
517         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
518         /// or has been forgotten (presumably previously claimed).
519         DuplicateClaim {},
520 }
521
522 /// The return value of `monitor_updating_restored`
523 pub(super) struct MonitorRestoreUpdates {
524         pub raa: Option<msgs::RevokeAndACK>,
525         pub commitment_update: Option<msgs::CommitmentUpdate>,
526         pub order: RAACommitmentOrder,
527         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
528         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
529         pub finalized_claimed_htlcs: Vec<HTLCSource>,
530         pub funding_broadcastable: Option<Transaction>,
531         pub channel_ready: Option<msgs::ChannelReady>,
532         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
533 }
534
535 /// The return value of `channel_reestablish`
536 pub(super) struct ReestablishResponses {
537         pub channel_ready: Option<msgs::ChannelReady>,
538         pub raa: Option<msgs::RevokeAndACK>,
539         pub commitment_update: Option<msgs::CommitmentUpdate>,
540         pub order: RAACommitmentOrder,
541         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
542         pub shutdown_msg: Option<msgs::Shutdown>,
543 }
544
545 /// The return type of `force_shutdown`
546 ///
547 /// Contains a tuple with the following:
548 /// - An optional (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
549 /// - A list of HTLCs to fail back in the form of the (source, payment hash, and this channel's
550 /// counterparty_node_id and channel_id).
551 /// - An optional transaction id identifying a corresponding batch funding transaction.
552 pub(crate) type ShutdownResult = (
553         Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
554         Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
555         Option<Txid>
556 );
557
558 /// If the majority of the channels funds are to the fundee and the initiator holds only just
559 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
560 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
561 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
562 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
563 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
564 /// by this multiple without hitting this case, before sending.
565 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
566 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
567 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
568 /// leave the channel less usable as we hold a bigger reserve.
569 #[cfg(any(fuzzing, test))]
570 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
571 #[cfg(not(any(fuzzing, test)))]
572 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
573
574 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
575 /// channel creation on an inbound channel, we simply force-close and move on.
576 /// This constant is the one suggested in BOLT 2.
577 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
578
579 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
580 /// not have enough balance value remaining to cover the onchain cost of this new
581 /// HTLC weight. If this happens, our counterparty fails the reception of our
582 /// commitment_signed including this new HTLC due to infringement on the channel
583 /// reserve.
584 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
585 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
586 /// leads to a channel force-close. Ultimately, this is an issue coming from the
587 /// design of LN state machines, allowing asynchronous updates.
588 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
589
590 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
591 /// commitment transaction fees, with at least this many HTLCs present on the commitment
592 /// transaction (not counting the value of the HTLCs themselves).
593 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
594
595 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
596 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
597 /// ChannelUpdate prompted by the config update. This value was determined as follows:
598 ///
599 ///   * The expected interval between ticks (1 minute).
600 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
601 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
602 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
603 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
604
605 /// The number of ticks that may elapse while we're waiting for a response to a
606 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
607 /// them.
608 ///
609 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
610 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
611
612 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
613 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
614 /// exceeding this age limit will be force-closed and purged from memory.
615 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
616
617 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
618 pub(crate) const COINBASE_MATURITY: u32 = 100;
619
620 struct PendingChannelMonitorUpdate {
621         update: ChannelMonitorUpdate,
622 }
623
624 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
625         (0, update, required),
626 });
627
628 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
629 /// its variants containing an appropriate channel struct.
630 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
631         UnfundedOutboundV1(OutboundV1Channel<SP>),
632         UnfundedInboundV1(InboundV1Channel<SP>),
633         Funded(Channel<SP>),
634 }
635
636 impl<'a, SP: Deref> ChannelPhase<SP> where
637         SP::Target: SignerProvider,
638         <SP::Target as SignerProvider>::Signer: ChannelSigner,
639 {
640         pub fn context(&'a self) -> &'a ChannelContext<SP> {
641                 match self {
642                         ChannelPhase::Funded(chan) => &chan.context,
643                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
644                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
645                 }
646         }
647
648         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
649                 match self {
650                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
651                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
652                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
653                 }
654         }
655 }
656
657 /// Contains all state common to unfunded inbound/outbound channels.
658 pub(super) struct UnfundedChannelContext {
659         /// A counter tracking how many ticks have elapsed since this unfunded channel was
660         /// created. If this unfunded channel reaches peer has yet to respond after reaching
661         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
662         ///
663         /// This is so that we don't keep channels around that haven't progressed to a funded state
664         /// in a timely manner.
665         unfunded_channel_age_ticks: usize,
666 }
667
668 impl UnfundedChannelContext {
669         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
670         /// having reached the unfunded channel age limit.
671         ///
672         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
673         pub fn should_expire_unfunded_channel(&mut self) -> bool {
674                 self.unfunded_channel_age_ticks += 1;
675                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
676         }
677 }
678
679 /// Contains everything about the channel including state, and various flags.
680 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
681         config: LegacyChannelConfig,
682
683         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
684         // constructed using it. The second element in the tuple corresponds to the number of ticks that
685         // have elapsed since the update occurred.
686         prev_config: Option<(ChannelConfig, usize)>,
687
688         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
689
690         user_id: u128,
691
692         /// The current channel ID.
693         channel_id: ChannelId,
694         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
695         /// Will be `None` for channels created prior to 0.0.115.
696         temporary_channel_id: Option<ChannelId>,
697         channel_state: u32,
698
699         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
700         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
701         // next connect.
702         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
703         // Note that a number of our tests were written prior to the behavior here which retransmits
704         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
705         // many tests.
706         #[cfg(any(test, feature = "_test_utils"))]
707         pub(crate) announcement_sigs_state: AnnouncementSigsState,
708         #[cfg(not(any(test, feature = "_test_utils")))]
709         announcement_sigs_state: AnnouncementSigsState,
710
711         secp_ctx: Secp256k1<secp256k1::All>,
712         channel_value_satoshis: u64,
713
714         latest_monitor_update_id: u64,
715
716         holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
717         shutdown_scriptpubkey: Option<ShutdownScript>,
718         destination_script: Script,
719
720         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
721         // generation start at 0 and count up...this simplifies some parts of implementation at the
722         // cost of others, but should really just be changed.
723
724         cur_holder_commitment_transaction_number: u64,
725         cur_counterparty_commitment_transaction_number: u64,
726         value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
727         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
728         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
729         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
730
731         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
732         /// need to ensure we resend them in the order we originally generated them. Note that because
733         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
734         /// sufficient to simply set this to the opposite of any message we are generating as we
735         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
736         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
737         /// send it first.
738         resend_order: RAACommitmentOrder,
739
740         monitor_pending_channel_ready: bool,
741         monitor_pending_revoke_and_ack: bool,
742         monitor_pending_commitment_signed: bool,
743
744         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
745         // responsible for some of the HTLCs here or not - we don't know whether the update in question
746         // completed or not. We currently ignore these fields entirely when force-closing a channel,
747         // but need to handle this somehow or we run the risk of losing HTLCs!
748         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
749         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
750         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
751
752         // pending_update_fee is filled when sending and receiving update_fee.
753         //
754         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
755         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
756         // generating new commitment transactions with exactly the same criteria as inbound/outbound
757         // HTLCs with similar state.
758         pending_update_fee: Option<(u32, FeeUpdateState)>,
759         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
760         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
761         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
762         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
763         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
764         holding_cell_update_fee: Option<u32>,
765         next_holder_htlc_id: u64,
766         next_counterparty_htlc_id: u64,
767         feerate_per_kw: u32,
768
769         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
770         /// when the channel is updated in ways which may impact the `channel_update` message or when a
771         /// new block is received, ensuring it's always at least moderately close to the current real
772         /// time.
773         update_time_counter: u32,
774
775         #[cfg(debug_assertions)]
776         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
777         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
778         #[cfg(debug_assertions)]
779         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
780         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
781
782         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
783         target_closing_feerate_sats_per_kw: Option<u32>,
784
785         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
786         /// update, we need to delay processing it until later. We do that here by simply storing the
787         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
788         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
789
790         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
791         /// transaction. These are set once we reach `closing_negotiation_ready`.
792         #[cfg(test)]
793         pub(crate) closing_fee_limits: Option<(u64, u64)>,
794         #[cfg(not(test))]
795         closing_fee_limits: Option<(u64, u64)>,
796
797         /// The hash of the block in which the funding transaction was included.
798         funding_tx_confirmed_in: Option<BlockHash>,
799         funding_tx_confirmation_height: u32,
800         short_channel_id: Option<u64>,
801         /// Either the height at which this channel was created or the height at which it was last
802         /// serialized if it was serialized by versions prior to 0.0.103.
803         /// We use this to close if funding is never broadcasted.
804         channel_creation_height: u32,
805
806         counterparty_dust_limit_satoshis: u64,
807
808         #[cfg(test)]
809         pub(super) holder_dust_limit_satoshis: u64,
810         #[cfg(not(test))]
811         holder_dust_limit_satoshis: u64,
812
813         #[cfg(test)]
814         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
815         #[cfg(not(test))]
816         counterparty_max_htlc_value_in_flight_msat: u64,
817
818         #[cfg(test)]
819         pub(super) holder_max_htlc_value_in_flight_msat: u64,
820         #[cfg(not(test))]
821         holder_max_htlc_value_in_flight_msat: u64,
822
823         /// minimum channel reserve for self to maintain - set by them.
824         counterparty_selected_channel_reserve_satoshis: Option<u64>,
825
826         #[cfg(test)]
827         pub(super) holder_selected_channel_reserve_satoshis: u64,
828         #[cfg(not(test))]
829         holder_selected_channel_reserve_satoshis: u64,
830
831         counterparty_htlc_minimum_msat: u64,
832         holder_htlc_minimum_msat: u64,
833         #[cfg(test)]
834         pub counterparty_max_accepted_htlcs: u16,
835         #[cfg(not(test))]
836         counterparty_max_accepted_htlcs: u16,
837         holder_max_accepted_htlcs: u16,
838         minimum_depth: Option<u32>,
839
840         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
841
842         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
843         funding_transaction: Option<Transaction>,
844         is_batch_funding: Option<()>,
845
846         counterparty_cur_commitment_point: Option<PublicKey>,
847         counterparty_prev_commitment_point: Option<PublicKey>,
848         counterparty_node_id: PublicKey,
849
850         counterparty_shutdown_scriptpubkey: Option<Script>,
851
852         commitment_secrets: CounterpartyCommitmentSecrets,
853
854         channel_update_status: ChannelUpdateStatus,
855         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
856         /// not complete within a single timer tick (one minute), we should force-close the channel.
857         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
858         /// to DoS us.
859         /// Note that this field is reset to false on deserialization to give us a chance to connect to
860         /// our peer and start the closing_signed negotiation fresh.
861         closing_signed_in_flight: bool,
862
863         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
864         /// This can be used to rebroadcast the channel_announcement message later.
865         announcement_sigs: Option<(Signature, Signature)>,
866
867         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
868         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
869         // be, by comparing the cached values to the fee of the tranaction generated by
870         // `build_commitment_transaction`.
871         #[cfg(any(test, fuzzing))]
872         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
873         #[cfg(any(test, fuzzing))]
874         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
875
876         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
877         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
878         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
879         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
880         /// message until we receive a channel_reestablish.
881         ///
882         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
883         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
884
885         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
886         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
887         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
888         /// unblock the state machine.
889         ///
890         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
891         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
892         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
893         ///
894         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
895         /// [`msgs::RevokeAndACK`] message from the counterparty.
896         sent_message_awaiting_response: Option<usize>,
897
898         #[cfg(any(test, fuzzing))]
899         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
900         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
901         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
902         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
903         // is fine, but as a sanity check in our failure to generate the second claim, we check here
904         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
905         historical_inbound_htlc_fulfills: HashSet<u64>,
906
907         /// This channel's type, as negotiated during channel open
908         channel_type: ChannelTypeFeatures,
909
910         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
911         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
912         // the channel's funding UTXO.
913         //
914         // We also use this when sending our peer a channel_update that isn't to be broadcasted
915         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
916         // associated channel mapping.
917         //
918         // We only bother storing the most recent SCID alias at any time, though our counterparty has
919         // to store all of them.
920         latest_inbound_scid_alias: Option<u64>,
921
922         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
923         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
924         // don't currently support node id aliases and eventually privacy should be provided with
925         // blinded paths instead of simple scid+node_id aliases.
926         outbound_scid_alias: u64,
927
928         // We track whether we already emitted a `ChannelPending` event.
929         channel_pending_event_emitted: bool,
930
931         // We track whether we already emitted a `ChannelReady` event.
932         channel_ready_event_emitted: bool,
933
934         /// The unique identifier used to re-derive the private key material for the channel through
935         /// [`SignerProvider::derive_channel_signer`].
936         channel_keys_id: [u8; 32],
937
938         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
939         /// store it here and only release it to the `ChannelManager` once it asks for it.
940         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
941 }
942
943 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
944         /// Allowed in any state (including after shutdown)
945         pub fn get_update_time_counter(&self) -> u32 {
946                 self.update_time_counter
947         }
948
949         pub fn get_latest_monitor_update_id(&self) -> u64 {
950                 self.latest_monitor_update_id
951         }
952
953         pub fn should_announce(&self) -> bool {
954                 self.config.announced_channel
955         }
956
957         pub fn is_outbound(&self) -> bool {
958                 self.channel_transaction_parameters.is_outbound_from_holder
959         }
960
961         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
962         /// Allowed in any state (including after shutdown)
963         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
964                 self.config.options.forwarding_fee_base_msat
965         }
966
967         /// Returns true if we've ever received a message from the remote end for this Channel
968         pub fn have_received_message(&self) -> bool {
969                 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
970         }
971
972         /// Returns true if this channel is fully established and not known to be closing.
973         /// Allowed in any state (including after shutdown)
974         pub fn is_usable(&self) -> bool {
975                 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
976                 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
977         }
978
979         /// shutdown state returns the state of the channel in its various stages of shutdown
980         pub fn shutdown_state(&self) -> ChannelShutdownState {
981                 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
982                         return ChannelShutdownState::ShutdownComplete;
983                 }
984                 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
985                         return ChannelShutdownState::ShutdownInitiated;
986                 }
987                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
988                         return ChannelShutdownState::ResolvingHTLCs;
989                 }
990                 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
991                         return ChannelShutdownState::NegotiatingClosingFee;
992                 }
993                 return ChannelShutdownState::NotShuttingDown;
994         }
995
996         fn closing_negotiation_ready(&self) -> bool {
997                 self.pending_inbound_htlcs.is_empty() &&
998                 self.pending_outbound_htlcs.is_empty() &&
999                 self.pending_update_fee.is_none() &&
1000                 self.channel_state &
1001                 (BOTH_SIDES_SHUTDOWN_MASK |
1002                         ChannelState::AwaitingRemoteRevoke as u32 |
1003                         ChannelState::PeerDisconnected as u32 |
1004                         ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1005         }
1006
1007         /// Returns true if this channel is currently available for use. This is a superset of
1008         /// is_usable() and considers things like the channel being temporarily disabled.
1009         /// Allowed in any state (including after shutdown)
1010         pub fn is_live(&self) -> bool {
1011                 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1012         }
1013
1014         // Public utilities:
1015
1016         pub fn channel_id(&self) -> ChannelId {
1017                 self.channel_id
1018         }
1019
1020         // Return the `temporary_channel_id` used during channel establishment.
1021         //
1022         // Will return `None` for channels created prior to LDK version 0.0.115.
1023         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1024                 self.temporary_channel_id
1025         }
1026
1027         pub fn minimum_depth(&self) -> Option<u32> {
1028                 self.minimum_depth
1029         }
1030
1031         /// Gets the "user_id" value passed into the construction of this channel. It has no special
1032         /// meaning and exists only to allow users to have a persistent identifier of a channel.
1033         pub fn get_user_id(&self) -> u128 {
1034                 self.user_id
1035         }
1036
1037         /// Gets the channel's type
1038         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1039                 &self.channel_type
1040         }
1041
1042         /// Gets the channel's `short_channel_id`.
1043         ///
1044         /// Will return `None` if the channel hasn't been confirmed yet.
1045         pub fn get_short_channel_id(&self) -> Option<u64> {
1046                 self.short_channel_id
1047         }
1048
1049         /// Allowed in any state (including after shutdown)
1050         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1051                 self.latest_inbound_scid_alias
1052         }
1053
1054         /// Allowed in any state (including after shutdown)
1055         pub fn outbound_scid_alias(&self) -> u64 {
1056                 self.outbound_scid_alias
1057         }
1058
1059         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1060         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1061         /// or prior to any channel actions during `Channel` initialization.
1062         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1063                 debug_assert_eq!(self.outbound_scid_alias, 0);
1064                 self.outbound_scid_alias = outbound_scid_alias;
1065         }
1066
1067         /// Returns the funding_txo we either got from our peer, or were given by
1068         /// get_funding_created.
1069         pub fn get_funding_txo(&self) -> Option<OutPoint> {
1070                 self.channel_transaction_parameters.funding_outpoint
1071         }
1072
1073         /// Returns the block hash in which our funding transaction was confirmed.
1074         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1075                 self.funding_tx_confirmed_in
1076         }
1077
1078         /// Returns the current number of confirmations on the funding transaction.
1079         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1080                 if self.funding_tx_confirmation_height == 0 {
1081                         // We either haven't seen any confirmation yet, or observed a reorg.
1082                         return 0;
1083                 }
1084
1085                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1086         }
1087
1088         fn get_holder_selected_contest_delay(&self) -> u16 {
1089                 self.channel_transaction_parameters.holder_selected_contest_delay
1090         }
1091
1092         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1093                 &self.channel_transaction_parameters.holder_pubkeys
1094         }
1095
1096         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1097                 self.channel_transaction_parameters.counterparty_parameters
1098                         .as_ref().map(|params| params.selected_contest_delay)
1099         }
1100
1101         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1102                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1103         }
1104
1105         /// Allowed in any state (including after shutdown)
1106         pub fn get_counterparty_node_id(&self) -> PublicKey {
1107                 self.counterparty_node_id
1108         }
1109
1110         /// Allowed in any state (including after shutdown)
1111         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1112                 self.holder_htlc_minimum_msat
1113         }
1114
1115         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1116         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1117                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1118         }
1119
1120         /// Allowed in any state (including after shutdown)
1121         pub fn get_announced_htlc_max_msat(&self) -> u64 {
1122                 return cmp::min(
1123                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1124                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
1125                         // channel might have been used to route very small values (either by honest users or as DoS).
1126                         self.channel_value_satoshis * 1000 * 9 / 10,
1127
1128                         self.counterparty_max_htlc_value_in_flight_msat
1129                 );
1130         }
1131
1132         /// Allowed in any state (including after shutdown)
1133         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1134                 self.counterparty_htlc_minimum_msat
1135         }
1136
1137         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1138         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1139                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1140         }
1141
1142         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1143                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1144                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1145                         cmp::min(
1146                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1147                                 party_max_htlc_value_in_flight_msat
1148                         )
1149                 })
1150         }
1151
1152         pub fn get_value_satoshis(&self) -> u64 {
1153                 self.channel_value_satoshis
1154         }
1155
1156         pub fn get_fee_proportional_millionths(&self) -> u32 {
1157                 self.config.options.forwarding_fee_proportional_millionths
1158         }
1159
1160         pub fn get_cltv_expiry_delta(&self) -> u16 {
1161                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1162         }
1163
1164         pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1165                 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1166         where F::Target: FeeEstimator
1167         {
1168                 match self.config.options.max_dust_htlc_exposure {
1169                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1170                                 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1171                                         ConfirmationTarget::HighPriority);
1172                                 feerate_per_kw as u64 * multiplier
1173                         },
1174                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1175                 }
1176         }
1177
1178         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1179         pub fn prev_config(&self) -> Option<ChannelConfig> {
1180                 self.prev_config.map(|prev_config| prev_config.0)
1181         }
1182
1183         // Checks whether we should emit a `ChannelPending` event.
1184         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1185                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1186         }
1187
1188         // Returns whether we already emitted a `ChannelPending` event.
1189         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1190                 self.channel_pending_event_emitted
1191         }
1192
1193         // Remembers that we already emitted a `ChannelPending` event.
1194         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1195                 self.channel_pending_event_emitted = true;
1196         }
1197
1198         // Checks whether we should emit a `ChannelReady` event.
1199         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1200                 self.is_usable() && !self.channel_ready_event_emitted
1201         }
1202
1203         // Remembers that we already emitted a `ChannelReady` event.
1204         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1205                 self.channel_ready_event_emitted = true;
1206         }
1207
1208         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1209         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1210         /// no longer be considered when forwarding HTLCs.
1211         pub fn maybe_expire_prev_config(&mut self) {
1212                 if self.prev_config.is_none() {
1213                         return;
1214                 }
1215                 let prev_config = self.prev_config.as_mut().unwrap();
1216                 prev_config.1 += 1;
1217                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1218                         self.prev_config = None;
1219                 }
1220         }
1221
1222         /// Returns the current [`ChannelConfig`] applied to the channel.
1223         pub fn config(&self) -> ChannelConfig {
1224                 self.config.options
1225         }
1226
1227         /// Updates the channel's config. A bool is returned indicating whether the config update
1228         /// applied resulted in a new ChannelUpdate message.
1229         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1230                 let did_channel_update =
1231                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1232                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1233                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1234                 if did_channel_update {
1235                         self.prev_config = Some((self.config.options, 0));
1236                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1237                         // policy change to propagate throughout the network.
1238                         self.update_time_counter += 1;
1239                 }
1240                 self.config.options = *config;
1241                 did_channel_update
1242         }
1243
1244         /// Returns true if funding_signed was sent/received and the
1245         /// funding transaction has been broadcast if necessary.
1246         pub fn is_funding_broadcast(&self) -> bool {
1247                 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1248                         self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1249         }
1250
1251         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1252         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1253         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1254         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1255         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1256         /// an HTLC to a).
1257         /// @local is used only to convert relevant internal structures which refer to remote vs local
1258         /// to decide value of outputs and direction of HTLCs.
1259         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1260         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1261         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1262         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1263         /// which peer generated this transaction and "to whom" this transaction flows.
1264         #[inline]
1265         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1266                 where L::Target: Logger
1267         {
1268                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1269                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1270                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1271
1272                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1273                 let mut remote_htlc_total_msat = 0;
1274                 let mut local_htlc_total_msat = 0;
1275                 let mut value_to_self_msat_offset = 0;
1276
1277                 let mut feerate_per_kw = self.feerate_per_kw;
1278                 if let Some((feerate, update_state)) = self.pending_update_fee {
1279                         if match update_state {
1280                                 // Note that these match the inclusion criteria when scanning
1281                                 // pending_inbound_htlcs below.
1282                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1283                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1284                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
1285                         } {
1286                                 feerate_per_kw = feerate;
1287                         }
1288                 }
1289
1290                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1291                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1292                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1293                         &self.channel_id,
1294                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1295
1296                 macro_rules! get_htlc_in_commitment {
1297                         ($htlc: expr, $offered: expr) => {
1298                                 HTLCOutputInCommitment {
1299                                         offered: $offered,
1300                                         amount_msat: $htlc.amount_msat,
1301                                         cltv_expiry: $htlc.cltv_expiry,
1302                                         payment_hash: $htlc.payment_hash,
1303                                         transaction_output_index: None
1304                                 }
1305                         }
1306                 }
1307
1308                 macro_rules! add_htlc_output {
1309                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1310                                 if $outbound == local { // "offered HTLC output"
1311                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1312                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1313                                                 0
1314                                         } else {
1315                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1316                                         };
1317                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1318                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1319                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1320                                         } else {
1321                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1322                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1323                                         }
1324                                 } else {
1325                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1326                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1327                                                 0
1328                                         } else {
1329                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1330                                         };
1331                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1332                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1333                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1334                                         } else {
1335                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1336                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1337                                         }
1338                                 }
1339                         }
1340                 }
1341
1342                 for ref htlc in self.pending_inbound_htlcs.iter() {
1343                         let (include, state_name) = match htlc.state {
1344                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1345                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1346                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1347                                 InboundHTLCState::Committed => (true, "Committed"),
1348                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1349                         };
1350
1351                         if include {
1352                                 add_htlc_output!(htlc, false, None, state_name);
1353                                 remote_htlc_total_msat += htlc.amount_msat;
1354                         } else {
1355                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1356                                 match &htlc.state {
1357                                         &InboundHTLCState::LocalRemoved(ref reason) => {
1358                                                 if generated_by_local {
1359                                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1360                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
1361                                                         }
1362                                                 }
1363                                         },
1364                                         _ => {},
1365                                 }
1366                         }
1367                 }
1368
1369                 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1370
1371                 for ref htlc in self.pending_outbound_htlcs.iter() {
1372                         let (include, state_name) = match htlc.state {
1373                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1374                                 OutboundHTLCState::Committed => (true, "Committed"),
1375                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1376                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1377                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1378                         };
1379
1380                         let preimage_opt = match htlc.state {
1381                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1382                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1383                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1384                                 _ => None,
1385                         };
1386
1387                         if let Some(preimage) = preimage_opt {
1388                                 preimages.push(preimage);
1389                         }
1390
1391                         if include {
1392                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1393                                 local_htlc_total_msat += htlc.amount_msat;
1394                         } else {
1395                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1396                                 match htlc.state {
1397                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1398                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
1399                                         },
1400                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1401                                                 if !generated_by_local {
1402                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
1403                                                 }
1404                                         },
1405                                         _ => {},
1406                                 }
1407                         }
1408                 }
1409
1410                 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1411                 assert!(value_to_self_msat >= 0);
1412                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1413                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1414                 // "violate" their reserve value by couting those against it. Thus, we have to convert
1415                 // everything to i64 before subtracting as otherwise we can overflow.
1416                 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1417                 assert!(value_to_remote_msat >= 0);
1418
1419                 #[cfg(debug_assertions)]
1420                 {
1421                         // Make sure that the to_self/to_remote is always either past the appropriate
1422                         // channel_reserve *or* it is making progress towards it.
1423                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1424                                 self.holder_max_commitment_tx_output.lock().unwrap()
1425                         } else {
1426                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
1427                         };
1428                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1429                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1430                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1431                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1432                 }
1433
1434                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1435                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1436                 let (value_to_self, value_to_remote) = if self.is_outbound() {
1437                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1438                 } else {
1439                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1440                 };
1441
1442                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1443                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1444                 let (funding_pubkey_a, funding_pubkey_b) = if local {
1445                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1446                 } else {
1447                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1448                 };
1449
1450                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1451                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1452                 } else {
1453                         value_to_a = 0;
1454                 }
1455
1456                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1457                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1458                 } else {
1459                         value_to_b = 0;
1460                 }
1461
1462                 let num_nondust_htlcs = included_non_dust_htlcs.len();
1463
1464                 let channel_parameters =
1465                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1466                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1467                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1468                                                                              value_to_a as u64,
1469                                                                              value_to_b as u64,
1470                                                                              funding_pubkey_a,
1471                                                                              funding_pubkey_b,
1472                                                                              keys.clone(),
1473                                                                              feerate_per_kw,
1474                                                                              &mut included_non_dust_htlcs,
1475                                                                              &channel_parameters
1476                 );
1477                 let mut htlcs_included = included_non_dust_htlcs;
1478                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1479                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1480                 htlcs_included.append(&mut included_dust_htlcs);
1481
1482                 // For the stats, trimmed-to-0 the value in msats accordingly
1483                 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1484                 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1485
1486                 CommitmentStats {
1487                         tx,
1488                         feerate_per_kw,
1489                         total_fee_sat,
1490                         num_nondust_htlcs,
1491                         htlcs_included,
1492                         local_balance_msat: value_to_self_msat as u64,
1493                         remote_balance_msat: value_to_remote_msat as u64,
1494                         preimages
1495                 }
1496         }
1497
1498         #[inline]
1499         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1500         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1501         /// our counterparty!)
1502         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1503         /// TODO Some magic rust shit to compile-time check this?
1504         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1505                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1506                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1507                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1508                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1509
1510                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1511         }
1512
1513         #[inline]
1514         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1515         /// will sign and send to our counterparty.
1516         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1517         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1518                 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1519                 //may see payments to it!
1520                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1521                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1522                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1523
1524                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1525         }
1526
1527         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1528         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1529         /// Panics if called before accept_channel/InboundV1Channel::new
1530         pub fn get_funding_redeemscript(&self) -> Script {
1531                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1532         }
1533
1534         fn counterparty_funding_pubkey(&self) -> &PublicKey {
1535                 &self.get_counterparty_pubkeys().funding_pubkey
1536         }
1537
1538         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1539                 self.feerate_per_kw
1540         }
1541
1542         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1543                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1544                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1545                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1546                 // more dust balance if the feerate increases when we have several HTLCs pending
1547                 // which are near the dust limit.
1548                 let mut feerate_per_kw = self.feerate_per_kw;
1549                 // If there's a pending update fee, use it to ensure we aren't under-estimating
1550                 // potential feerate updates coming soon.
1551                 if let Some((feerate, _)) = self.pending_update_fee {
1552                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1553                 }
1554                 if let Some(feerate) = outbound_feerate_update {
1555                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1556                 }
1557                 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1558         }
1559
1560         /// Get forwarding information for the counterparty.
1561         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1562                 self.counterparty_forwarding_info.clone()
1563         }
1564
1565         /// Returns a HTLCStats about inbound pending htlcs
1566         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1567                 let context = self;
1568                 let mut stats = HTLCStats {
1569                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1570                         pending_htlcs_value_msat: 0,
1571                         on_counterparty_tx_dust_exposure_msat: 0,
1572                         on_holder_tx_dust_exposure_msat: 0,
1573                         holding_cell_msat: 0,
1574                         on_holder_tx_holding_cell_htlcs_count: 0,
1575                 };
1576
1577                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1578                         (0, 0)
1579                 } else {
1580                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1581                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1582                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1583                 };
1584                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1585                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1586                 for ref htlc in context.pending_inbound_htlcs.iter() {
1587                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1588                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1589                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1590                         }
1591                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1592                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1593                         }
1594                 }
1595                 stats
1596         }
1597
1598         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1599         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1600                 let context = self;
1601                 let mut stats = HTLCStats {
1602                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1603                         pending_htlcs_value_msat: 0,
1604                         on_counterparty_tx_dust_exposure_msat: 0,
1605                         on_holder_tx_dust_exposure_msat: 0,
1606                         holding_cell_msat: 0,
1607                         on_holder_tx_holding_cell_htlcs_count: 0,
1608                 };
1609
1610                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1611                         (0, 0)
1612                 } else {
1613                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1614                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1615                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1616                 };
1617                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1618                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1619                 for ref htlc in context.pending_outbound_htlcs.iter() {
1620                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1621                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1622                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1623                         }
1624                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1625                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1626                         }
1627                 }
1628
1629                 for update in context.holding_cell_htlc_updates.iter() {
1630                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1631                                 stats.pending_htlcs += 1;
1632                                 stats.pending_htlcs_value_msat += amount_msat;
1633                                 stats.holding_cell_msat += amount_msat;
1634                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1635                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1636                                 }
1637                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1638                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
1639                                 } else {
1640                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
1641                                 }
1642                         }
1643                 }
1644                 stats
1645         }
1646
1647         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1648         /// Doesn't bother handling the
1649         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1650         /// corner case properly.
1651         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1652         -> AvailableBalances
1653         where F::Target: FeeEstimator
1654         {
1655                 let context = &self;
1656                 // Note that we have to handle overflow due to the above case.
1657                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1658                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1659
1660                 let mut balance_msat = context.value_to_self_msat;
1661                 for ref htlc in context.pending_inbound_htlcs.iter() {
1662                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1663                                 balance_msat += htlc.amount_msat;
1664                         }
1665                 }
1666                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1667
1668                 let outbound_capacity_msat = context.value_to_self_msat
1669                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1670                                 .saturating_sub(
1671                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1672
1673                 let mut available_capacity_msat = outbound_capacity_msat;
1674
1675                 if context.is_outbound() {
1676                         // We should mind channel commit tx fee when computing how much of the available capacity
1677                         // can be used in the next htlc. Mirrors the logic in send_htlc.
1678                         //
1679                         // The fee depends on whether the amount we will be sending is above dust or not,
1680                         // and the answer will in turn change the amount itself â€” making it a circular
1681                         // dependency.
1682                         // This complicates the computation around dust-values, up to the one-htlc-value.
1683                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1684                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1685                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1686                         }
1687
1688                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1689                         let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1690                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1691                         let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1692
1693                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
1694                         // value ends up being below dust, we have this fee available again. In that case,
1695                         // match the value to right-below-dust.
1696                         let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
1697                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1698                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1699                                 debug_assert!(one_htlc_difference_msat != 0);
1700                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1701                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1702                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1703                         } else {
1704                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1705                         }
1706                 } else {
1707                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1708                         // sending a new HTLC won't reduce their balance below our reserve threshold.
1709                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1710                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1711                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1712                         }
1713
1714                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1715                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1716
1717                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1718                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1719                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1720
1721                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
1722                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1723                                 // we've selected for them, we can only send dust HTLCs.
1724                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1725                         }
1726                 }
1727
1728                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1729
1730                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1731                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1732                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1733                 // send above the dust limit (as the router can always overpay to meet the dust limit).
1734                 let mut remaining_msat_below_dust_exposure_limit = None;
1735                 let mut dust_exposure_dust_limit_msat = 0;
1736                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1737
1738                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1739                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1740                 } else {
1741                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1742                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1743                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1744                 };
1745                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1746                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1747                         remaining_msat_below_dust_exposure_limit =
1748                                 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1749                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1750                 }
1751
1752                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1753                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1754                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1755                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1756                                 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1757                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1758                 }
1759
1760                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1761                         if available_capacity_msat < dust_exposure_dust_limit_msat {
1762                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1763                         } else {
1764                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1765                         }
1766                 }
1767
1768                 available_capacity_msat = cmp::min(available_capacity_msat,
1769                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1770
1771                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1772                         available_capacity_msat = 0;
1773                 }
1774
1775                 AvailableBalances {
1776                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1777                                         - context.value_to_self_msat as i64
1778                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1779                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1780                                 0) as u64,
1781                         outbound_capacity_msat,
1782                         next_outbound_htlc_limit_msat: available_capacity_msat,
1783                         next_outbound_htlc_minimum_msat,
1784                         balance_msat,
1785                 }
1786         }
1787
1788         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1789                 let context = &self;
1790                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1791         }
1792
1793         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1794         /// number of pending HTLCs that are on track to be in our next commitment tx.
1795         ///
1796         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1797         /// `fee_spike_buffer_htlc` is `Some`.
1798         ///
1799         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1800         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1801         ///
1802         /// Dust HTLCs are excluded.
1803         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1804                 let context = &self;
1805                 assert!(context.is_outbound());
1806
1807                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1808                         (0, 0)
1809                 } else {
1810                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1811                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1812                 };
1813                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1814                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1815
1816                 let mut addl_htlcs = 0;
1817                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1818                 match htlc.origin {
1819                         HTLCInitiator::LocalOffered => {
1820                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1821                                         addl_htlcs += 1;
1822                                 }
1823                         },
1824                         HTLCInitiator::RemoteOffered => {
1825                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1826                                         addl_htlcs += 1;
1827                                 }
1828                         }
1829                 }
1830
1831                 let mut included_htlcs = 0;
1832                 for ref htlc in context.pending_inbound_htlcs.iter() {
1833                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1834                                 continue
1835                         }
1836                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1837                         // transaction including this HTLC if it times out before they RAA.
1838                         included_htlcs += 1;
1839                 }
1840
1841                 for ref htlc in context.pending_outbound_htlcs.iter() {
1842                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1843                                 continue
1844                         }
1845                         match htlc.state {
1846                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1847                                 OutboundHTLCState::Committed => included_htlcs += 1,
1848                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1849                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1850                                 // transaction won't be generated until they send us their next RAA, which will mean
1851                                 // dropping any HTLCs in this state.
1852                                 _ => {},
1853                         }
1854                 }
1855
1856                 for htlc in context.holding_cell_htlc_updates.iter() {
1857                         match htlc {
1858                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1859                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
1860                                                 continue
1861                                         }
1862                                         included_htlcs += 1
1863                                 },
1864                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1865                                          // ack we're guaranteed to never include them in commitment txs anymore.
1866                         }
1867                 }
1868
1869                 let num_htlcs = included_htlcs + addl_htlcs;
1870                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1871                 #[cfg(any(test, fuzzing))]
1872                 {
1873                         let mut fee = res;
1874                         if fee_spike_buffer_htlc.is_some() {
1875                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1876                         }
1877                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1878                                 + context.holding_cell_htlc_updates.len();
1879                         let commitment_tx_info = CommitmentTxInfoCached {
1880                                 fee,
1881                                 total_pending_htlcs,
1882                                 next_holder_htlc_id: match htlc.origin {
1883                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1884                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1885                                 },
1886                                 next_counterparty_htlc_id: match htlc.origin {
1887                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1888                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1889                                 },
1890                                 feerate: context.feerate_per_kw,
1891                         };
1892                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1893                 }
1894                 res
1895         }
1896
1897         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1898         /// pending HTLCs that are on track to be in their next commitment tx
1899         ///
1900         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1901         /// `fee_spike_buffer_htlc` is `Some`.
1902         ///
1903         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1904         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1905         ///
1906         /// Dust HTLCs are excluded.
1907         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1908                 let context = &self;
1909                 assert!(!context.is_outbound());
1910
1911                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1912                         (0, 0)
1913                 } else {
1914                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1915                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1916                 };
1917                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1918                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1919
1920                 let mut addl_htlcs = 0;
1921                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1922                 match htlc.origin {
1923                         HTLCInitiator::LocalOffered => {
1924                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1925                                         addl_htlcs += 1;
1926                                 }
1927                         },
1928                         HTLCInitiator::RemoteOffered => {
1929                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1930                                         addl_htlcs += 1;
1931                                 }
1932                         }
1933                 }
1934
1935                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1936                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1937                 // committed outbound HTLCs, see below.
1938                 let mut included_htlcs = 0;
1939                 for ref htlc in context.pending_inbound_htlcs.iter() {
1940                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1941                                 continue
1942                         }
1943                         included_htlcs += 1;
1944                 }
1945
1946                 for ref htlc in context.pending_outbound_htlcs.iter() {
1947                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1948                                 continue
1949                         }
1950                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1951                         // i.e. if they've responded to us with an RAA after announcement.
1952                         match htlc.state {
1953                                 OutboundHTLCState::Committed => included_htlcs += 1,
1954                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1955                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1956                                 _ => {},
1957                         }
1958                 }
1959
1960                 let num_htlcs = included_htlcs + addl_htlcs;
1961                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1962                 #[cfg(any(test, fuzzing))]
1963                 {
1964                         let mut fee = res;
1965                         if fee_spike_buffer_htlc.is_some() {
1966                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1967                         }
1968                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1969                         let commitment_tx_info = CommitmentTxInfoCached {
1970                                 fee,
1971                                 total_pending_htlcs,
1972                                 next_holder_htlc_id: match htlc.origin {
1973                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1974                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1975                                 },
1976                                 next_counterparty_htlc_id: match htlc.origin {
1977                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1978                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1979                                 },
1980                                 feerate: context.feerate_per_kw,
1981                         };
1982                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1983                 }
1984                 res
1985         }
1986
1987         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
1988                 where F: Fn() -> Option<O> {
1989                 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
1990                    self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
1991                         f()
1992                 } else {
1993                         None
1994                 }
1995         }
1996
1997         /// Returns the transaction if there is a pending funding transaction that is yet to be
1998         /// broadcast.
1999         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2000                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2001         }
2002
2003         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2004         /// broadcast.
2005         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2006                 self.if_unbroadcasted_funding(||
2007                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2008                 )
2009         }
2010
2011         /// Returns whether the channel is funded in a batch.
2012         pub fn is_batch_funding(&self) -> bool {
2013                 self.is_batch_funding.is_some()
2014         }
2015
2016         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2017         /// broadcast.
2018         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2019                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2020         }
2021
2022         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2023         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2024         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2025         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2026         /// immediately (others we will have to allow to time out).
2027         pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2028                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2029                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2030                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2031                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2032                 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2033
2034                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2035                 // return them to fail the payment.
2036                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2037                 let counterparty_node_id = self.get_counterparty_node_id();
2038                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2039                         match htlc_update {
2040                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2041                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2042                                 },
2043                                 _ => {}
2044                         }
2045                 }
2046                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2047                         // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2048                         // returning a channel monitor update here would imply a channel monitor update before
2049                         // we even registered the channel monitor to begin with, which is invalid.
2050                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
2051                         // funding transaction, don't return a funding txo (which prevents providing the
2052                         // monitor update to the user, even if we return one).
2053                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2054                         if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2055                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2056                                 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2057                                         update_id: self.latest_monitor_update_id,
2058                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2059                                 }))
2060                         } else { None }
2061                 } else { None };
2062                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2063
2064                 self.channel_state = ChannelState::ShutdownComplete as u32;
2065                 self.update_time_counter += 1;
2066                 (monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid)
2067         }
2068 }
2069
2070 // Internal utility functions for channels
2071
2072 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2073 /// `channel_value_satoshis` in msat, set through
2074 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2075 ///
2076 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2077 ///
2078 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2079 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2080         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2081                 1
2082         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2083                 100
2084         } else {
2085                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2086         };
2087         channel_value_satoshis * 10 * configured_percent
2088 }
2089
2090 /// Returns a minimum channel reserve value the remote needs to maintain,
2091 /// required by us according to the configured or default
2092 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2093 ///
2094 /// Guaranteed to return a value no larger than channel_value_satoshis
2095 ///
2096 /// This is used both for outbound and inbound channels and has lower bound
2097 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2098 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2099         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2100         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2101 }
2102
2103 /// This is for legacy reasons, present for forward-compatibility.
2104 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2105 /// from storage. Hence, we use this function to not persist default values of
2106 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2107 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2108         let (q, _) = channel_value_satoshis.overflowing_div(100);
2109         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2110 }
2111
2112 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2113 // Note that num_htlcs should not include dust HTLCs.
2114 #[inline]
2115 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2116         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2117 }
2118
2119 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2120 // Note that num_htlcs should not include dust HTLCs.
2121 fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2122         // Note that we need to divide before multiplying to round properly,
2123         // since the lowest denomination of bitcoin on-chain is the satoshi.
2124         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2125 }
2126
2127 // Holder designates channel data owned for the benefit of the user client.
2128 // Counterparty designates channel data owned by the another channel participant entity.
2129 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2130         pub context: ChannelContext<SP>,
2131 }
2132
2133 #[cfg(any(test, fuzzing))]
2134 struct CommitmentTxInfoCached {
2135         fee: u64,
2136         total_pending_htlcs: usize,
2137         next_holder_htlc_id: u64,
2138         next_counterparty_htlc_id: u64,
2139         feerate: u32,
2140 }
2141
2142 impl<SP: Deref> Channel<SP> where
2143         SP::Target: SignerProvider,
2144         <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2145 {
2146         fn check_remote_fee<F: Deref, L: Deref>(
2147                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2148                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2149         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2150         {
2151                 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2152                 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2153                 // We generally don't care too much if they set the feerate to something very high, but it
2154                 // could result in the channel being useless due to everything being dust. This doesn't
2155                 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2156                 // zero fee, so their fee is no longer considered to determine dust limits.
2157                 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2158                         let upper_limit = cmp::max(250 * 25,
2159                                 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
2160                         if feerate_per_kw as u64 > upper_limit {
2161                                 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2162                         }
2163                 }
2164
2165                 // We can afford to use a lower bound with anchors than previously since we can now bump
2166                 // fees when broadcasting our commitment. However, we must still make sure we meet the
2167                 // minimum mempool feerate, until package relay is deployed, such that we can ensure the
2168                 // commitment transaction propagates throughout node mempools on its own.
2169                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2170                         ConfirmationTarget::MempoolMinimum
2171                 } else {
2172                         ConfirmationTarget::Background
2173                 };
2174                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2175                 // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
2176                 // occasional issues with feerate disagreements between an initiator that wants a feerate
2177                 // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
2178                 // sat/kw before the comparison here.
2179                 if feerate_per_kw + 250 < lower_limit {
2180                         if let Some(cur_feerate) = cur_feerate_per_kw {
2181                                 if feerate_per_kw > cur_feerate {
2182                                         log_warn!(logger,
2183                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2184                                                 cur_feerate, feerate_per_kw);
2185                                         return Ok(());
2186                                 }
2187                         }
2188                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
2189                 }
2190                 Ok(())
2191         }
2192
2193         #[inline]
2194         fn get_closing_scriptpubkey(&self) -> Script {
2195                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2196                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2197                 // outside of those situations will fail.
2198                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2199         }
2200
2201         #[inline]
2202         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2203                 let mut ret =
2204                 (4 +                                                   // version
2205                  1 +                                                   // input count
2206                  36 +                                                  // prevout
2207                  1 +                                                   // script length (0)
2208                  4 +                                                   // sequence
2209                  1 +                                                   // output count
2210                  4                                                     // lock time
2211                  )*4 +                                                 // * 4 for non-witness parts
2212                 2 +                                                    // witness marker and flag
2213                 1 +                                                    // witness element count
2214                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
2215                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2216                 2*(1 + 71);                                            // two signatures + sighash type flags
2217                 if let Some(spk) = a_scriptpubkey {
2218                         ret += ((8+1) +                                    // output values and script length
2219                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2220                 }
2221                 if let Some(spk) = b_scriptpubkey {
2222                         ret += ((8+1) +                                    // output values and script length
2223                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2224                 }
2225                 ret
2226         }
2227
2228         #[inline]
2229         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2230                 assert!(self.context.pending_inbound_htlcs.is_empty());
2231                 assert!(self.context.pending_outbound_htlcs.is_empty());
2232                 assert!(self.context.pending_update_fee.is_none());
2233
2234                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2235                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2236                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2237
2238                 if value_to_holder < 0 {
2239                         assert!(self.context.is_outbound());
2240                         total_fee_satoshis += (-value_to_holder) as u64;
2241                 } else if value_to_counterparty < 0 {
2242                         assert!(!self.context.is_outbound());
2243                         total_fee_satoshis += (-value_to_counterparty) as u64;
2244                 }
2245
2246                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2247                         value_to_counterparty = 0;
2248                 }
2249
2250                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2251                         value_to_holder = 0;
2252                 }
2253
2254                 assert!(self.context.shutdown_scriptpubkey.is_some());
2255                 let holder_shutdown_script = self.get_closing_scriptpubkey();
2256                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2257                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2258
2259                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2260                 (closing_transaction, total_fee_satoshis)
2261         }
2262
2263         fn funding_outpoint(&self) -> OutPoint {
2264                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2265         }
2266
2267         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2268         /// entirely.
2269         ///
2270         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2271         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2272         ///
2273         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2274         /// disconnected).
2275         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2276                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2277         where L::Target: Logger {
2278                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2279                 // (see equivalent if condition there).
2280                 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2281                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2282                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2283                 self.context.latest_monitor_update_id = mon_update_id;
2284                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2285                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2286                 }
2287         }
2288
2289         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2290                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2291                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2292                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2293                 // either.
2294                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2295                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2296                 }
2297                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2298
2299                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2300                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2301                 // these, but for now we just have to treat them as normal.
2302
2303                 let mut pending_idx = core::usize::MAX;
2304                 let mut htlc_value_msat = 0;
2305                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2306                         if htlc.htlc_id == htlc_id_arg {
2307                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2308                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2309                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2310                                 match htlc.state {
2311                                         InboundHTLCState::Committed => {},
2312                                         InboundHTLCState::LocalRemoved(ref reason) => {
2313                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2314                                                 } else {
2315                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2316                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2317                                                 }
2318                                                 return UpdateFulfillFetch::DuplicateClaim {};
2319                                         },
2320                                         _ => {
2321                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2322                                                 // Don't return in release mode here so that we can update channel_monitor
2323                                         }
2324                                 }
2325                                 pending_idx = idx;
2326                                 htlc_value_msat = htlc.amount_msat;
2327                                 break;
2328                         }
2329                 }
2330                 if pending_idx == core::usize::MAX {
2331                         #[cfg(any(test, fuzzing))]
2332                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2333                         // this is simply a duplicate claim, not previously failed and we lost funds.
2334                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2335                         return UpdateFulfillFetch::DuplicateClaim {};
2336                 }
2337
2338                 // Now update local state:
2339                 //
2340                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2341                 // can claim it even if the channel hits the chain before we see their next commitment.
2342                 self.context.latest_monitor_update_id += 1;
2343                 let monitor_update = ChannelMonitorUpdate {
2344                         update_id: self.context.latest_monitor_update_id,
2345                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2346                                 payment_preimage: payment_preimage_arg.clone(),
2347                         }],
2348                 };
2349
2350                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2351                         // Note that this condition is the same as the assertion in
2352                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2353                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2354                         // do not not get into this branch.
2355                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2356                                 match pending_update {
2357                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2358                                                 if htlc_id_arg == htlc_id {
2359                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
2360                                                         self.context.latest_monitor_update_id -= 1;
2361                                                         #[cfg(any(test, fuzzing))]
2362                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2363                                                         return UpdateFulfillFetch::DuplicateClaim {};
2364                                                 }
2365                                         },
2366                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2367                                                 if htlc_id_arg == htlc_id {
2368                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2369                                                         // TODO: We may actually be able to switch to a fulfill here, though its
2370                                                         // rare enough it may not be worth the complexity burden.
2371                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2372                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2373                                                 }
2374                                         },
2375                                         _ => {}
2376                                 }
2377                         }
2378                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2379                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2380                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2381                         });
2382                         #[cfg(any(test, fuzzing))]
2383                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2384                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2385                 }
2386                 #[cfg(any(test, fuzzing))]
2387                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2388
2389                 {
2390                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2391                         if let InboundHTLCState::Committed = htlc.state {
2392                         } else {
2393                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2394                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2395                         }
2396                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2397                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2398                 }
2399
2400                 UpdateFulfillFetch::NewClaim {
2401                         monitor_update,
2402                         htlc_value_msat,
2403                         msg: Some(msgs::UpdateFulfillHTLC {
2404                                 channel_id: self.context.channel_id(),
2405                                 htlc_id: htlc_id_arg,
2406                                 payment_preimage: payment_preimage_arg,
2407                         }),
2408                 }
2409         }
2410
2411         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2412                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2413                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2414                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2415                                 // Even if we aren't supposed to let new monitor updates with commitment state
2416                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2417                                 // matter what. Sadly, to push a new monitor update which flies before others
2418                                 // already queued, we have to insert it into the pending queue and update the
2419                                 // update_ids of all the following monitors.
2420                                 if release_cs_monitor && msg.is_some() {
2421                                         let mut additional_update = self.build_commitment_no_status_check(logger);
2422                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
2423                                         // to be strictly increasing by one, so decrement it here.
2424                                         self.context.latest_monitor_update_id = monitor_update.update_id;
2425                                         monitor_update.updates.append(&mut additional_update.updates);
2426                                 } else {
2427                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
2428                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2429                                         monitor_update.update_id = new_mon_id;
2430                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
2431                                                 held_update.update.update_id += 1;
2432                                         }
2433                                         if msg.is_some() {
2434                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2435                                                 let update = self.build_commitment_no_status_check(logger);
2436                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2437                                                         update,
2438                                                 });
2439                                         }
2440                                 }
2441
2442                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2443                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2444                         },
2445                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2446                 }
2447         }
2448
2449         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2450         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2451         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2452         /// before we fail backwards.
2453         ///
2454         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2455         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2456         /// [`ChannelError::Ignore`].
2457         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2458         -> Result<(), ChannelError> where L::Target: Logger {
2459                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2460                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2461         }
2462
2463         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2464         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2465         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2466         /// before we fail backwards.
2467         ///
2468         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2469         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2470         /// [`ChannelError::Ignore`].
2471         fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2472         -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2473                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2474                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
2475                 }
2476                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2477
2478                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2479                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2480                 // these, but for now we just have to treat them as normal.
2481
2482                 let mut pending_idx = core::usize::MAX;
2483                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2484                         if htlc.htlc_id == htlc_id_arg {
2485                                 match htlc.state {
2486                                         InboundHTLCState::Committed => {},
2487                                         InboundHTLCState::LocalRemoved(ref reason) => {
2488                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2489                                                 } else {
2490                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2491                                                 }
2492                                                 return Ok(None);
2493                                         },
2494                                         _ => {
2495                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2496                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2497                                         }
2498                                 }
2499                                 pending_idx = idx;
2500                         }
2501                 }
2502                 if pending_idx == core::usize::MAX {
2503                         #[cfg(any(test, fuzzing))]
2504                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2505                         // is simply a duplicate fail, not previously failed and we failed-back too early.
2506                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2507                         return Ok(None);
2508                 }
2509
2510                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2511                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2512                         force_holding_cell = true;
2513                 }
2514
2515                 // Now update local state:
2516                 if force_holding_cell {
2517                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2518                                 match pending_update {
2519                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2520                                                 if htlc_id_arg == htlc_id {
2521                                                         #[cfg(any(test, fuzzing))]
2522                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2523                                                         return Ok(None);
2524                                                 }
2525                                         },
2526                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2527                                                 if htlc_id_arg == htlc_id {
2528                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2529                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2530                                                 }
2531                                         },
2532                                         _ => {}
2533                                 }
2534                         }
2535                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2536                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2537                                 htlc_id: htlc_id_arg,
2538                                 err_packet,
2539                         });
2540                         return Ok(None);
2541                 }
2542
2543                 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2544                 {
2545                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2546                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2547                 }
2548
2549                 Ok(Some(msgs::UpdateFailHTLC {
2550                         channel_id: self.context.channel_id(),
2551                         htlc_id: htlc_id_arg,
2552                         reason: err_packet
2553                 }))
2554         }
2555
2556         // Message handlers:
2557
2558         /// Handles a funding_signed message from the remote end.
2559         /// If this call is successful, broadcast the funding transaction (and not before!)
2560         pub fn funding_signed<L: Deref>(
2561                 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2562         ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2563         where
2564                 L::Target: Logger
2565         {
2566                 if !self.context.is_outbound() {
2567                         return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2568                 }
2569                 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2570                         return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2571                 }
2572                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2573                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2574                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2575                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2576                 }
2577
2578                 let funding_script = self.context.get_funding_redeemscript();
2579
2580                 let counterparty_keys = self.context.build_remote_transaction_keys();
2581                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2582                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2583                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2584
2585                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2586                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2587
2588                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2589                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2590                 {
2591                         let trusted_tx = initial_commitment_tx.trust();
2592                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2593                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2594                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2595                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2596                                 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2597                         }
2598                 }
2599
2600                 let holder_commitment_tx = HolderCommitmentTransaction::new(
2601                         initial_commitment_tx,
2602                         msg.signature,
2603                         Vec::new(),
2604                         &self.context.get_holder_pubkeys().funding_pubkey,
2605                         self.context.counterparty_funding_pubkey()
2606                 );
2607
2608                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2609                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2610
2611
2612                 let funding_redeemscript = self.context.get_funding_redeemscript();
2613                 let funding_txo = self.context.get_funding_txo().unwrap();
2614                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2615                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2616                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2617                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2618                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2619                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2620                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
2621                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
2622                                                           &self.context.channel_transaction_parameters,
2623                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
2624                                                           obscure_factor,
2625                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
2626
2627                 channel_monitor.provide_initial_counterparty_commitment_tx(
2628                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
2629                         self.context.cur_counterparty_commitment_transaction_number,
2630                         self.context.counterparty_cur_commitment_point.unwrap(),
2631                         counterparty_initial_commitment_tx.feerate_per_kw(),
2632                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2633                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2634
2635                 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2636                 if self.context.is_batch_funding() {
2637                         self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2638                 } else {
2639                         self.context.channel_state = ChannelState::FundingSent as u32;
2640                 }
2641                 self.context.cur_holder_commitment_transaction_number -= 1;
2642                 self.context.cur_counterparty_commitment_transaction_number -= 1;
2643
2644                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2645
2646                 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2647                 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2648                 Ok(channel_monitor)
2649         }
2650
2651         /// Updates the state of the channel to indicate that all channels in the batch have received
2652         /// funding_signed and persisted their monitors.
2653         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2654         /// treated as a non-batch channel going forward.
2655         pub fn set_batch_ready(&mut self) {
2656                 self.context.is_batch_funding = None;
2657                 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2658         }
2659
2660         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2661         /// and the channel is now usable (and public), this may generate an announcement_signatures to
2662         /// reply with.
2663         pub fn channel_ready<NS: Deref, L: Deref>(
2664                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
2665                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2666         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2667         where
2668                 NS::Target: NodeSigner,
2669                 L::Target: Logger
2670         {
2671                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2672                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2673                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2674                 }
2675
2676                 if let Some(scid_alias) = msg.short_channel_id_alias {
2677                         if Some(scid_alias) != self.context.short_channel_id {
2678                                 // The scid alias provided can be used to route payments *from* our counterparty,
2679                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
2680                                 // when routing outbound payments.
2681                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
2682                         }
2683                 }
2684
2685                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2686
2687                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2688                 // batch, but we can receive channel_ready messages.
2689                 debug_assert!(
2690                         non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2691                         non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2692                 );
2693                 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2694                         self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2695                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2696                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2697                         self.context.update_time_counter += 1;
2698                 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2699                         // If we reconnected before sending our `channel_ready` they may still resend theirs:
2700                         (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2701                                               (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2702                 {
2703                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
2704                         // required, or they're sending a fresh SCID alias.
2705                         let expected_point =
2706                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2707                                         // If they haven't ever sent an updated point, the point they send should match
2708                                         // the current one.
2709                                         self.context.counterparty_cur_commitment_point
2710                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2711                                         // If we've advanced the commitment number once, the second commitment point is
2712                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
2713                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2714                                         self.context.counterparty_prev_commitment_point
2715                                 } else {
2716                                         // If they have sent updated points, channel_ready is always supposed to match
2717                                         // their "first" point, which we re-derive here.
2718                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2719                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2720                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
2721                                 };
2722                         if expected_point != Some(msg.next_per_commitment_point) {
2723                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2724                         }
2725                         return Ok(None);
2726                 } else {
2727                         return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2728                 }
2729
2730                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2731                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2732
2733                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2734
2735                 Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
2736         }
2737
2738         pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2739                 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2740                 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2741         ) -> Result<(), ChannelError>
2742         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2743                 FE::Target: FeeEstimator, L::Target: Logger,
2744         {
2745                 // We can't accept HTLCs sent after we've sent a shutdown.
2746                 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2747                 if local_sent_shutdown {
2748                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2749                 }
2750                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2751                 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2752                 if remote_sent_shutdown {
2753                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2754                 }
2755                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2756                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2757                 }
2758                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2759                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2760                 }
2761                 if msg.amount_msat == 0 {
2762                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2763                 }
2764                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2765                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2766                 }
2767
2768                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2769                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2770                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2771                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2772                 }
2773                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2774                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2775                 }
2776                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2777                 // the reserve_satoshis we told them to always have as direct payment so that they lose
2778                 // something if we punish them for broadcasting an old state).
2779                 // Note that we don't really care about having a small/no to_remote output in our local
2780                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2781                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2782                 // present in the next commitment transaction we send them (at least for fulfilled ones,
2783                 // failed ones won't modify value_to_self).
2784                 // Note that we will send HTLCs which another instance of rust-lightning would think
2785                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2786                 // Channel state once they will not be present in the next received commitment
2787                 // transaction).
2788                 let mut removed_outbound_total_msat = 0;
2789                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2790                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2791                                 removed_outbound_total_msat += htlc.amount_msat;
2792                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2793                                 removed_outbound_total_msat += htlc.amount_msat;
2794                         }
2795                 }
2796
2797                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2798                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2799                         (0, 0)
2800                 } else {
2801                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2802                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2803                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2804                 };
2805                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2806                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2807                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2808                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2809                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2810                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2811                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2812                         }
2813                 }
2814
2815                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2816                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2817                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2818                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2819                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2820                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2821                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2822                         }
2823                 }
2824
2825                 let pending_value_to_self_msat =
2826                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2827                 let pending_remote_value_msat =
2828                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2829                 if pending_remote_value_msat < msg.amount_msat {
2830                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2831                 }
2832
2833                 // Check that the remote can afford to pay for this HTLC on-chain at the current
2834                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2835                 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2836                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2837                         self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2838                 };
2839                 if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
2840                         return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2841                 };
2842
2843                 if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2844                         return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2845                 }
2846
2847                 if !self.context.is_outbound() {
2848                         // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2849                         // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
2850                         // receiver's side, only on the sender's.
2851                         // Note that when we eventually remove support for fee updates and switch to anchor output
2852                         // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
2853                         // the extra htlc when calculating the next remote commitment transaction fee as we should
2854                         // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
2855                         // sensitive to fee spikes.
2856                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2857                         let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2858                         if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
2859                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2860                                 // the HTLC, i.e. its status is already set to failing.
2861                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2862                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2863                         }
2864                 } else {
2865                         // Check that they won't violate our local required channel reserve by adding this HTLC.
2866                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2867                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2868                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
2869                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2870                         }
2871                 }
2872                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2873                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2874                 }
2875                 if msg.cltv_expiry >= 500000000 {
2876                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2877                 }
2878
2879                 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2880                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2881                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2882                         }
2883                 }
2884
2885                 // Now update local state:
2886                 self.context.next_counterparty_htlc_id += 1;
2887                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2888                         htlc_id: msg.htlc_id,
2889                         amount_msat: msg.amount_msat,
2890                         payment_hash: msg.payment_hash,
2891                         cltv_expiry: msg.cltv_expiry,
2892                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
2893                 });
2894                 Ok(())
2895         }
2896
2897         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2898         #[inline]
2899         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
2900                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
2901                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
2902                         if htlc.htlc_id == htlc_id {
2903                                 let outcome = match check_preimage {
2904                                         None => fail_reason.into(),
2905                                         Some(payment_preimage) => {
2906                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
2907                                                 if payment_hash != htlc.payment_hash {
2908                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
2909                                                 }
2910                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
2911                                         }
2912                                 };
2913                                 match htlc.state {
2914                                         OutboundHTLCState::LocalAnnounced(_) =>
2915                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
2916                                         OutboundHTLCState::Committed => {
2917                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
2918                                         },
2919                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
2920                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
2921                                 }
2922                                 return Ok(htlc);
2923                         }
2924                 }
2925                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
2926         }
2927
2928         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
2929                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2930                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
2931                 }
2932                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2933                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
2934                 }
2935
2936                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
2937         }
2938
2939         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2940                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2941                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
2942                 }
2943                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2944                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
2945                 }
2946
2947                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2948                 Ok(())
2949         }
2950
2951         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2952                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2953                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
2954                 }
2955                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2956                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
2957                 }
2958
2959                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2960                 Ok(())
2961         }
2962
2963         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
2964                 where L::Target: Logger
2965         {
2966                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2967                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
2968                 }
2969                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2970                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
2971                 }
2972                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
2973                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
2974                 }
2975
2976                 let funding_script = self.context.get_funding_redeemscript();
2977
2978                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2979
2980                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
2981                 let commitment_txid = {
2982                         let trusted_tx = commitment_stats.tx.trust();
2983                         let bitcoin_tx = trusted_tx.built_transaction();
2984                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2985
2986                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
2987                                 log_bytes!(msg.signature.serialize_compact()[..]),
2988                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
2989                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
2990                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
2991                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
2992                         }
2993                         bitcoin_tx.txid
2994                 };
2995                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
2996
2997                 // If our counterparty updated the channel fee in this commitment transaction, check that
2998                 // they can actually afford the new fee now.
2999                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3000                         update_state == FeeUpdateState::RemoteAnnounced
3001                 } else { false };
3002                 if update_fee {
3003                         debug_assert!(!self.context.is_outbound());
3004                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3005                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3006                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3007                         }
3008                 }
3009                 #[cfg(any(test, fuzzing))]
3010                 {
3011                         if self.context.is_outbound() {
3012                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3013                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3014                                 if let Some(info) = projected_commit_tx_info {
3015                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3016                                                 + self.context.holding_cell_htlc_updates.len();
3017                                         if info.total_pending_htlcs == total_pending_htlcs
3018                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3019                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3020                                                 && info.feerate == self.context.feerate_per_kw {
3021                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3022                                                 }
3023                                 }
3024                         }
3025                 }
3026
3027                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3028                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3029                 }
3030
3031                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3032                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3033                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3034                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3035                 // backwards compatibility, we never use it in production. To provide test coverage, here,
3036                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3037                 #[allow(unused_assignments, unused_mut)]
3038                 let mut separate_nondust_htlc_sources = false;
3039                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3040                         use core::hash::{BuildHasher, Hasher};
3041                         // Get a random value using the only std API to do so - the DefaultHasher
3042                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3043                         separate_nondust_htlc_sources = rand_val % 2 == 0;
3044                 }
3045
3046                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3047                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3048                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3049                         if let Some(_) = htlc.transaction_output_index {
3050                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3051                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3052                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3053
3054                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3055                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3056                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3057                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3058                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3059                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3060                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3061                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3062                                 }
3063                                 if !separate_nondust_htlc_sources {
3064                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3065                                 }
3066                         } else {
3067                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3068                         }
3069                         if separate_nondust_htlc_sources {
3070                                 if let Some(source) = source_opt.take() {
3071                                         nondust_htlc_sources.push(source);
3072                                 }
3073                         }
3074                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3075                 }
3076
3077                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3078                         commitment_stats.tx,
3079                         msg.signature,
3080                         msg.htlc_signatures.clone(),
3081                         &self.context.get_holder_pubkeys().funding_pubkey,
3082                         self.context.counterparty_funding_pubkey()
3083                 );
3084
3085                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3086                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3087
3088                 // Update state now that we've passed all the can-fail calls...
3089                 let mut need_commitment = false;
3090                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3091                         if *update_state == FeeUpdateState::RemoteAnnounced {
3092                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3093                                 need_commitment = true;
3094                         }
3095                 }
3096
3097                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3098                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3099                                 Some(forward_info.clone())
3100                         } else { None };
3101                         if let Some(forward_info) = new_forward {
3102                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3103                                         &htlc.payment_hash, &self.context.channel_id);
3104                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3105                                 need_commitment = true;
3106                         }
3107                 }
3108                 let mut claimed_htlcs = Vec::new();
3109                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3110                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3111                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3112                                         &htlc.payment_hash, &self.context.channel_id);
3113                                 // Grab the preimage, if it exists, instead of cloning
3114                                 let mut reason = OutboundHTLCOutcome::Success(None);
3115                                 mem::swap(outcome, &mut reason);
3116                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3117                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3118                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3119                                         // have a `Success(None)` reason. In this case we could forget some HTLC
3120                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
3121                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
3122                                         // claim anyway.
3123                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3124                                 }
3125                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3126                                 need_commitment = true;
3127                         }
3128                 }
3129
3130                 self.context.latest_monitor_update_id += 1;
3131                 let mut monitor_update = ChannelMonitorUpdate {
3132                         update_id: self.context.latest_monitor_update_id,
3133                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3134                                 commitment_tx: holder_commitment_tx,
3135                                 htlc_outputs: htlcs_and_sigs,
3136                                 claimed_htlcs,
3137                                 nondust_htlc_sources,
3138                         }]
3139                 };
3140
3141                 self.context.cur_holder_commitment_transaction_number -= 1;
3142                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3143                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3144                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3145
3146                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3147                         // In case we initially failed monitor updating without requiring a response, we need
3148                         // to make sure the RAA gets sent first.
3149                         self.context.monitor_pending_revoke_and_ack = true;
3150                         if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3151                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3152                                 // the corresponding HTLC status updates so that get_last_commitment_update
3153                                 // includes the right HTLCs.
3154                                 self.context.monitor_pending_commitment_signed = true;
3155                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3156                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3157                                 // strictly increasing by one, so decrement it here.
3158                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3159                                 monitor_update.updates.append(&mut additional_update.updates);
3160                         }
3161                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3162                                 &self.context.channel_id);
3163                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
3164                 }
3165
3166                 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3167                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3168                         // we'll send one right away when we get the revoke_and_ack when we
3169                         // free_holding_cell_htlcs().
3170                         let mut additional_update = self.build_commitment_no_status_check(logger);
3171                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3172                         // strictly increasing by one, so decrement it here.
3173                         self.context.latest_monitor_update_id = monitor_update.update_id;
3174                         monitor_update.updates.append(&mut additional_update.updates);
3175                         true
3176                 } else { false };
3177
3178                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3179                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3180                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3181                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3182         }
3183
3184         /// Public version of the below, checking relevant preconditions first.
3185         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3186         /// returns `(None, Vec::new())`.
3187         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3188                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3189         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3190         where F::Target: FeeEstimator, L::Target: Logger
3191         {
3192                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3193                    (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3194                         self.free_holding_cell_htlcs(fee_estimator, logger)
3195                 } else { (None, Vec::new()) }
3196         }
3197
3198         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3199         /// for our counterparty.
3200         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3201                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3202         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3203         where F::Target: FeeEstimator, L::Target: Logger
3204         {
3205                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3206                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3207                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3208                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3209
3210                         let mut monitor_update = ChannelMonitorUpdate {
3211                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3212                                 updates: Vec::new(),
3213                         };
3214
3215                         let mut htlc_updates = Vec::new();
3216                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3217                         let mut update_add_count = 0;
3218                         let mut update_fulfill_count = 0;
3219                         let mut update_fail_count = 0;
3220                         let mut htlcs_to_fail = Vec::new();
3221                         for htlc_update in htlc_updates.drain(..) {
3222                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
3223                                 // fee races with adding too many outputs which push our total payments just over
3224                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
3225                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3226                                 // to rebalance channels.
3227                                 match &htlc_update {
3228                                         &HTLCUpdateAwaitingACK::AddHTLC {
3229                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3230                                                 skimmed_fee_msat, ..
3231                                         } => {
3232                                                 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3233                                                         onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3234                                                 {
3235                                                         Ok(_) => update_add_count += 1,
3236                                                         Err(e) => {
3237                                                                 match e {
3238                                                                         ChannelError::Ignore(ref msg) => {
3239                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3240                                                                                 // If we fail to send here, then this HTLC should
3241                                                                                 // be failed backwards. Failing to send here
3242                                                                                 // indicates that this HTLC may keep being put back
3243                                                                                 // into the holding cell without ever being
3244                                                                                 // successfully forwarded/failed/fulfilled, causing
3245                                                                                 // our counterparty to eventually close on us.
3246                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
3247                                                                         },
3248                                                                         _ => {
3249                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3250                                                                         },
3251                                                                 }
3252                                                         }
3253                                                 }
3254                                         },
3255                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3256                                                 // If an HTLC claim was previously added to the holding cell (via
3257                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
3258                                                 // not fail - any in between attempts to claim the HTLC will have resulted
3259                                                 // in it hitting the holding cell again and we cannot change the state of a
3260                                                 // holding cell HTLC from fulfill to anything else.
3261                                                 let mut additional_monitor_update =
3262                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3263                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3264                                                         { monitor_update } else { unreachable!() };
3265                                                 update_fulfill_count += 1;
3266                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
3267                                         },
3268                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3269                                                 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3270                                                         Ok(update_fail_msg_option) => {
3271                                                                 // If an HTLC failure was previously added to the holding cell (via
3272                                                                 // `queue_fail_htlc`) then generating the fail message itself must
3273                                                                 // not fail - we should never end up in a state where we double-fail
3274                                                                 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3275                                                                 // for a full revocation before failing.
3276                                                                 debug_assert!(update_fail_msg_option.is_some());
3277                                                                 update_fail_count += 1;
3278                                                         },
3279                                                         Err(e) => {
3280                                                                 if let ChannelError::Ignore(_) = e {}
3281                                                                 else {
3282                                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3283                                                                 }
3284                                                         }
3285                                                 }
3286                                         },
3287                                 }
3288                         }
3289                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3290                                 return (None, htlcs_to_fail);
3291                         }
3292                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3293                                 self.send_update_fee(feerate, false, fee_estimator, logger)
3294                         } else {
3295                                 None
3296                         };
3297
3298                         let mut additional_update = self.build_commitment_no_status_check(logger);
3299                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3300                         // but we want them to be strictly increasing by one, so reset it here.
3301                         self.context.latest_monitor_update_id = monitor_update.update_id;
3302                         monitor_update.updates.append(&mut additional_update.updates);
3303
3304                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3305                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3306                                 update_add_count, update_fulfill_count, update_fail_count);
3307
3308                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3309                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3310                 } else {
3311                         (None, Vec::new())
3312                 }
3313         }
3314
3315         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3316         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3317         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3318         /// generating an appropriate error *after* the channel state has been updated based on the
3319         /// revoke_and_ack message.
3320         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3321                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3322         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3323         where F::Target: FeeEstimator, L::Target: Logger,
3324         {
3325                 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3326                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3327                 }
3328                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3329                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3330                 }
3331                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3332                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3333                 }
3334
3335                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3336
3337                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3338                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3339                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3340                         }
3341                 }
3342
3343                 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3344                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
3345                         // haven't given them a new commitment transaction to broadcast). We should probably
3346                         // take advantage of this by updating our channel monitor, sending them an error, and
3347                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3348                         // lot of work, and there's some chance this is all a misunderstanding anyway.
3349                         // We have to do *something*, though, since our signer may get mad at us for otherwise
3350                         // jumping a remote commitment number, so best to just force-close and move on.
3351                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3352                 }
3353
3354                 #[cfg(any(test, fuzzing))]
3355                 {
3356                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3357                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3358                 }
3359
3360                 match &self.context.holder_signer {
3361                         ChannelSignerType::Ecdsa(ecdsa) => {
3362                                 ecdsa.validate_counterparty_revocation(
3363                                         self.context.cur_counterparty_commitment_transaction_number + 1,
3364                                         &secret
3365                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3366                         }
3367                 };
3368
3369                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3370                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3371                 self.context.latest_monitor_update_id += 1;
3372                 let mut monitor_update = ChannelMonitorUpdate {
3373                         update_id: self.context.latest_monitor_update_id,
3374                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3375                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3376                                 secret: msg.per_commitment_secret,
3377                         }],
3378                 };
3379
3380                 // Update state now that we've passed all the can-fail calls...
3381                 // (note that we may still fail to generate the new commitment_signed message, but that's
3382                 // OK, we step the channel here and *then* if the new generation fails we can fail the
3383                 // channel based on that, but stepping stuff here should be safe either way.
3384                 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3385                 self.context.sent_message_awaiting_response = None;
3386                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3387                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3388                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3389
3390                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3391                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3392                 }
3393
3394                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3395                 let mut to_forward_infos = Vec::new();
3396                 let mut revoked_htlcs = Vec::new();
3397                 let mut finalized_claimed_htlcs = Vec::new();
3398                 let mut update_fail_htlcs = Vec::new();
3399                 let mut update_fail_malformed_htlcs = Vec::new();
3400                 let mut require_commitment = false;
3401                 let mut value_to_self_msat_diff: i64 = 0;
3402
3403                 {
3404                         // Take references explicitly so that we can hold multiple references to self.context.
3405                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3406                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3407
3408                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3409                         pending_inbound_htlcs.retain(|htlc| {
3410                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3411                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3412                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3413                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
3414                                         }
3415                                         false
3416                                 } else { true }
3417                         });
3418                         pending_outbound_htlcs.retain(|htlc| {
3419                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3420                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3421                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3422                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3423                                         } else {
3424                                                 finalized_claimed_htlcs.push(htlc.source.clone());
3425                                                 // They fulfilled, so we sent them money
3426                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
3427                                         }
3428                                         false
3429                                 } else { true }
3430                         });
3431                         for htlc in pending_inbound_htlcs.iter_mut() {
3432                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3433                                         true
3434                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3435                                         true
3436                                 } else { false };
3437                                 if swap {
3438                                         let mut state = InboundHTLCState::Committed;
3439                                         mem::swap(&mut state, &mut htlc.state);
3440
3441                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3442                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3443                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3444                                                 require_commitment = true;
3445                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3446                                                 match forward_info {
3447                                                         PendingHTLCStatus::Fail(fail_msg) => {
3448                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3449                                                                 require_commitment = true;
3450                                                                 match fail_msg {
3451                                                                         HTLCFailureMsg::Relay(msg) => {
3452                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3453                                                                                 update_fail_htlcs.push(msg)
3454                                                                         },
3455                                                                         HTLCFailureMsg::Malformed(msg) => {
3456                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3457                                                                                 update_fail_malformed_htlcs.push(msg)
3458                                                                         },
3459                                                                 }
3460                                                         },
3461                                                         PendingHTLCStatus::Forward(forward_info) => {
3462                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3463                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
3464                                                                 htlc.state = InboundHTLCState::Committed;
3465                                                         }
3466                                                 }
3467                                         }
3468                                 }
3469                         }
3470                         for htlc in pending_outbound_htlcs.iter_mut() {
3471                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3472                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3473                                         htlc.state = OutboundHTLCState::Committed;
3474                                 }
3475                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3476                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3477                                         // Grab the preimage, if it exists, instead of cloning
3478                                         let mut reason = OutboundHTLCOutcome::Success(None);
3479                                         mem::swap(outcome, &mut reason);
3480                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3481                                         require_commitment = true;
3482                                 }
3483                         }
3484                 }
3485                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3486
3487                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3488                         match update_state {
3489                                 FeeUpdateState::Outbound => {
3490                                         debug_assert!(self.context.is_outbound());
3491                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3492                                         self.context.feerate_per_kw = feerate;
3493                                         self.context.pending_update_fee = None;
3494                                 },
3495                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3496                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3497                                         debug_assert!(!self.context.is_outbound());
3498                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3499                                         require_commitment = true;
3500                                         self.context.feerate_per_kw = feerate;
3501                                         self.context.pending_update_fee = None;
3502                                 },
3503                         }
3504                 }
3505
3506                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3507                 let release_state_str =
3508                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3509                 macro_rules! return_with_htlcs_to_fail {
3510                         ($htlcs_to_fail: expr) => {
3511                                 if !release_monitor {
3512                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3513                                                 update: monitor_update,
3514                                         });
3515                                         return Ok(($htlcs_to_fail, None));
3516                                 } else {
3517                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
3518                                 }
3519                         }
3520                 }
3521
3522                 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3523                         // We can't actually generate a new commitment transaction (incl by freeing holding
3524                         // cells) while we can't update the monitor, so we just return what we have.
3525                         if require_commitment {
3526                                 self.context.monitor_pending_commitment_signed = true;
3527                                 // When the monitor updating is restored we'll call get_last_commitment_update(),
3528                                 // which does not update state, but we're definitely now awaiting a remote revoke
3529                                 // before we can step forward any more, so set it here.
3530                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3531                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3532                                 // strictly increasing by one, so decrement it here.
3533                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3534                                 monitor_update.updates.append(&mut additional_update.updates);
3535                         }
3536                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3537                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3538                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3539                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3540                         return_with_htlcs_to_fail!(Vec::new());
3541                 }
3542
3543                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3544                         (Some(mut additional_update), htlcs_to_fail) => {
3545                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3546                                 // strictly increasing by one, so decrement it here.
3547                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3548                                 monitor_update.updates.append(&mut additional_update.updates);
3549
3550                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3551                                         &self.context.channel_id(), release_state_str);
3552
3553                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3554                                 return_with_htlcs_to_fail!(htlcs_to_fail);
3555                         },
3556                         (None, htlcs_to_fail) => {
3557                                 if require_commitment {
3558                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3559
3560                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3561                                         // strictly increasing by one, so decrement it here.
3562                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3563                                         monitor_update.updates.append(&mut additional_update.updates);
3564
3565                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3566                                                 &self.context.channel_id(),
3567                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3568                                                 release_state_str);
3569
3570                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3571                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3572                                 } else {
3573                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3574                                                 &self.context.channel_id(), release_state_str);
3575
3576                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3577                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3578                                 }
3579                         }
3580                 }
3581         }
3582
3583         /// Queues up an outbound update fee by placing it in the holding cell. You should call
3584         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3585         /// commitment update.
3586         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3587                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3588         where F::Target: FeeEstimator, L::Target: Logger
3589         {
3590                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3591                 assert!(msg_opt.is_none(), "We forced holding cell?");
3592         }
3593
3594         /// Adds a pending update to this channel. See the doc for send_htlc for
3595         /// further details on the optionness of the return value.
3596         /// If our balance is too low to cover the cost of the next commitment transaction at the
3597         /// new feerate, the update is cancelled.
3598         ///
3599         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3600         /// [`Channel`] if `force_holding_cell` is false.
3601         fn send_update_fee<F: Deref, L: Deref>(
3602                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3603                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3604         ) -> Option<msgs::UpdateFee>
3605         where F::Target: FeeEstimator, L::Target: Logger
3606         {
3607                 if !self.context.is_outbound() {
3608                         panic!("Cannot send fee from inbound channel");
3609                 }
3610                 if !self.context.is_usable() {
3611                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3612                 }
3613                 if !self.context.is_live() {
3614                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3615                 }
3616
3617                 // Before proposing a feerate update, check that we can actually afford the new fee.
3618                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3619                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3620                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3621                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3622                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3623                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3624                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3625                         //TODO: auto-close after a number of failures?
3626                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3627                         return None;
3628                 }
3629
3630                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3631                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3632                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3633                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3634                 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3635                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3636                         return None;
3637                 }
3638                 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3639                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3640                         return None;
3641                 }
3642
3643                 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3644                         force_holding_cell = true;
3645                 }
3646
3647                 if force_holding_cell {
3648                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
3649                         return None;
3650                 }
3651
3652                 debug_assert!(self.context.pending_update_fee.is_none());
3653                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3654
3655                 Some(msgs::UpdateFee {
3656                         channel_id: self.context.channel_id,
3657                         feerate_per_kw,
3658                 })
3659         }
3660
3661         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3662         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3663         /// resent.
3664         /// No further message handling calls may be made until a channel_reestablish dance has
3665         /// completed.
3666         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3667         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3668                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3669                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3670                         return Err(());
3671                 }
3672
3673                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3674                         // While the below code should be idempotent, it's simpler to just return early, as
3675                         // redundant disconnect events can fire, though they should be rare.
3676                         return Ok(());
3677                 }
3678
3679                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3680                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3681                 }
3682
3683                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3684                 // will be retransmitted.
3685                 self.context.last_sent_closing_fee = None;
3686                 self.context.pending_counterparty_closing_signed = None;
3687                 self.context.closing_fee_limits = None;
3688
3689                 let mut inbound_drop_count = 0;
3690                 self.context.pending_inbound_htlcs.retain(|htlc| {
3691                         match htlc.state {
3692                                 InboundHTLCState::RemoteAnnounced(_) => {
3693                                         // They sent us an update_add_htlc but we never got the commitment_signed.
3694                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
3695                                         // this HTLC accordingly
3696                                         inbound_drop_count += 1;
3697                                         false
3698                                 },
3699                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3700                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
3701                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3702                                         // in response to it yet, so don't touch it.
3703                                         true
3704                                 },
3705                                 InboundHTLCState::Committed => true,
3706                                 InboundHTLCState::LocalRemoved(_) => {
3707                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3708                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
3709                                         // (that we missed). Keep this around for now and if they tell us they missed
3710                                         // the commitment_signed we can re-transmit the update then.
3711                                         true
3712                                 },
3713                         }
3714                 });
3715                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3716
3717                 if let Some((_, update_state)) = self.context.pending_update_fee {
3718                         if update_state == FeeUpdateState::RemoteAnnounced {
3719                                 debug_assert!(!self.context.is_outbound());
3720                                 self.context.pending_update_fee = None;
3721                         }
3722                 }
3723
3724                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3725                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3726                                 // They sent us an update to remove this but haven't yet sent the corresponding
3727                                 // commitment_signed, we need to move it back to Committed and they can re-send
3728                                 // the update upon reconnection.
3729                                 htlc.state = OutboundHTLCState::Committed;
3730                         }
3731                 }
3732
3733                 self.context.sent_message_awaiting_response = None;
3734
3735                 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3736                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3737                 Ok(())
3738         }
3739
3740         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3741         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3742         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3743         /// update completes (potentially immediately).
3744         /// The messages which were generated with the monitor update must *not* have been sent to the
3745         /// remote end, and must instead have been dropped. They will be regenerated when
3746         /// [`Self::monitor_updating_restored`] is called.
3747         ///
3748         /// [`ChannelManager`]: super::channelmanager::ChannelManager
3749         /// [`chain::Watch`]: crate::chain::Watch
3750         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3751         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3752                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3753                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3754                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3755         ) {
3756                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3757                 self.context.monitor_pending_commitment_signed |= resend_commitment;
3758                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3759                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3760                 self.context.monitor_pending_failures.append(&mut pending_fails);
3761                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3762                 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3763         }
3764
3765         /// Indicates that the latest ChannelMonitor update has been committed by the client
3766         /// successfully and we should restore normal operation. Returns messages which should be sent
3767         /// to the remote side.
3768         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3769                 &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
3770                 user_config: &UserConfig, best_block_height: u32
3771         ) -> MonitorRestoreUpdates
3772         where
3773                 L::Target: Logger,
3774                 NS::Target: NodeSigner
3775         {
3776                 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3777                 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3778
3779                 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3780                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3781                 // first received the funding_signed.
3782                 let mut funding_broadcastable =
3783                         if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3784                                 self.context.funding_transaction.take()
3785                         } else { None };
3786                 // That said, if the funding transaction is already confirmed (ie we're active with a
3787                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3788                 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3789                         funding_broadcastable = None;
3790                 }
3791
3792                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3793                 // (and we assume the user never directly broadcasts the funding transaction and waits for
3794                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3795                 // * an inbound channel that failed to persist the monitor on funding_created and we got
3796                 //   the funding transaction confirmed before the monitor was persisted, or
3797                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3798                 let channel_ready = if self.context.monitor_pending_channel_ready {
3799                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3800                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3801                         self.context.monitor_pending_channel_ready = false;
3802                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3803                         Some(msgs::ChannelReady {
3804                                 channel_id: self.context.channel_id(),
3805                                 next_per_commitment_point,
3806                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3807                         })
3808                 } else { None };
3809
3810                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
3811
3812                 let mut accepted_htlcs = Vec::new();
3813                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3814                 let mut failed_htlcs = Vec::new();
3815                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3816                 let mut finalized_claimed_htlcs = Vec::new();
3817                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3818
3819                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3820                         self.context.monitor_pending_revoke_and_ack = false;
3821                         self.context.monitor_pending_commitment_signed = false;
3822                         return MonitorRestoreUpdates {
3823                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3824                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3825                         };
3826                 }
3827
3828                 let raa = if self.context.monitor_pending_revoke_and_ack {
3829                         Some(self.get_last_revoke_and_ack())
3830                 } else { None };
3831                 let commitment_update = if self.context.monitor_pending_commitment_signed {
3832                         self.mark_awaiting_response();
3833                         Some(self.get_last_commitment_update(logger))
3834                 } else { None };
3835
3836                 self.context.monitor_pending_revoke_and_ack = false;
3837                 self.context.monitor_pending_commitment_signed = false;
3838                 let order = self.context.resend_order.clone();
3839                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3840                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3841                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3842                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3843                 MonitorRestoreUpdates {
3844                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3845                 }
3846         }
3847
3848         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3849                 where F::Target: FeeEstimator, L::Target: Logger
3850         {
3851                 if self.context.is_outbound() {
3852                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3853                 }
3854                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3855                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3856                 }
3857                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3858                 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3859
3860                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3861                 self.context.update_time_counter += 1;
3862                 // If the feerate has increased over the previous dust buffer (note that
3863                 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3864                 // won't be pushed over our dust exposure limit by the feerate increase.
3865                 if feerate_over_dust_buffer {
3866                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3867                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3868                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3869                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3870                         let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3871                         if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3872                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3873                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
3874                         }
3875                         if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3876                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3877                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3878                         }
3879                 }
3880                 Ok(())
3881         }
3882
3883         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
3884                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3885                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
3886                 msgs::RevokeAndACK {
3887                         channel_id: self.context.channel_id,
3888                         per_commitment_secret,
3889                         next_per_commitment_point,
3890                         #[cfg(taproot)]
3891                         next_local_nonce: None,
3892                 }
3893         }
3894
3895         fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
3896                 let mut update_add_htlcs = Vec::new();
3897                 let mut update_fulfill_htlcs = Vec::new();
3898                 let mut update_fail_htlcs = Vec::new();
3899                 let mut update_fail_malformed_htlcs = Vec::new();
3900
3901                 for htlc in self.context.pending_outbound_htlcs.iter() {
3902                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
3903                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
3904                                         channel_id: self.context.channel_id(),
3905                                         htlc_id: htlc.htlc_id,
3906                                         amount_msat: htlc.amount_msat,
3907                                         payment_hash: htlc.payment_hash,
3908                                         cltv_expiry: htlc.cltv_expiry,
3909                                         onion_routing_packet: (**onion_packet).clone(),
3910                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
3911                                 });
3912                         }
3913                 }
3914
3915                 for htlc in self.context.pending_inbound_htlcs.iter() {
3916                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3917                                 match reason {
3918                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
3919                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
3920                                                         channel_id: self.context.channel_id(),
3921                                                         htlc_id: htlc.htlc_id,
3922                                                         reason: err_packet.clone()
3923                                                 });
3924                                         },
3925                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
3926                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
3927                                                         channel_id: self.context.channel_id(),
3928                                                         htlc_id: htlc.htlc_id,
3929                                                         sha256_of_onion: sha256_of_onion.clone(),
3930                                                         failure_code: failure_code.clone(),
3931                                                 });
3932                                         },
3933                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
3934                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
3935                                                         channel_id: self.context.channel_id(),
3936                                                         htlc_id: htlc.htlc_id,
3937                                                         payment_preimage: payment_preimage.clone(),
3938                                                 });
3939                                         },
3940                                 }
3941                         }
3942                 }
3943
3944                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
3945                         Some(msgs::UpdateFee {
3946                                 channel_id: self.context.channel_id(),
3947                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
3948                         })
3949                 } else { None };
3950
3951                 log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3952                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
3953                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3954                 msgs::CommitmentUpdate {
3955                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
3956                         commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
3957                 }
3958         }
3959
3960         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
3961         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
3962                 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
3963                         assert!(self.context.shutdown_scriptpubkey.is_some());
3964                         Some(msgs::Shutdown {
3965                                 channel_id: self.context.channel_id,
3966                                 scriptpubkey: self.get_closing_scriptpubkey(),
3967                         })
3968                 } else { None }
3969         }
3970
3971         /// May panic if some calls other than message-handling calls (which will all Err immediately)
3972         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
3973         ///
3974         /// Some links printed in log lines are included here to check them during build (when run with
3975         /// `cargo doc --document-private-items`):
3976         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
3977         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
3978         pub fn channel_reestablish<L: Deref, NS: Deref>(
3979                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
3980                 genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
3981         ) -> Result<ReestablishResponses, ChannelError>
3982         where
3983                 L::Target: Logger,
3984                 NS::Target: NodeSigner
3985         {
3986                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
3987                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
3988                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
3989                         // just close here instead of trying to recover.
3990                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
3991                 }
3992
3993                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
3994                         msg.next_local_commitment_number == 0 {
3995                         return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
3996                 }
3997
3998                 if msg.next_remote_commitment_number > 0 {
3999                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4000                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4001                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4002                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4003                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4004                         }
4005                         if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4006                                 macro_rules! log_and_panic {
4007                                         ($err_msg: expr) => {
4008                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4009                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4010                                         }
4011                                 }
4012                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4013                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4014                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4015                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4016                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4017                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4018                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4019                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4020                         }
4021                 }
4022
4023                 // Before we change the state of the channel, we check if the peer is sending a very old
4024                 // commitment transaction number, if yes we send a warning message.
4025                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4026                 if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4027                         return Err(
4028                                 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4029                         );
4030                 }
4031
4032                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4033                 // remaining cases either succeed or ErrorMessage-fail).
4034                 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4035                 self.context.sent_message_awaiting_response = None;
4036
4037                 let shutdown_msg = self.get_outbound_shutdown();
4038
4039                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
4040
4041                 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4042                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4043                         if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4044                                         self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4045                                 if msg.next_remote_commitment_number != 0 {
4046                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4047                                 }
4048                                 // Short circuit the whole handler as there is nothing we can resend them
4049                                 return Ok(ReestablishResponses {
4050                                         channel_ready: None,
4051                                         raa: None, commitment_update: None,
4052                                         order: RAACommitmentOrder::CommitmentFirst,
4053                                         shutdown_msg, announcement_sigs,
4054                                 });
4055                         }
4056
4057                         // We have OurChannelReady set!
4058                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4059                         return Ok(ReestablishResponses {
4060                                 channel_ready: Some(msgs::ChannelReady {
4061                                         channel_id: self.context.channel_id(),
4062                                         next_per_commitment_point,
4063                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
4064                                 }),
4065                                 raa: None, commitment_update: None,
4066                                 order: RAACommitmentOrder::CommitmentFirst,
4067                                 shutdown_msg, announcement_sigs,
4068                         });
4069                 }
4070
4071                 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4072                         // Remote isn't waiting on any RevokeAndACK from us!
4073                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4074                         None
4075                 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4076                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4077                                 self.context.monitor_pending_revoke_and_ack = true;
4078                                 None
4079                         } else {
4080                                 Some(self.get_last_revoke_and_ack())
4081                         }
4082                 } else {
4083                         return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4084                 };
4085
4086                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4087                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4088                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4089                 // the corresponding revoke_and_ack back yet.
4090                 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4091                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4092                         self.mark_awaiting_response();
4093                 }
4094                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4095
4096                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4097                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4098                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4099                         Some(msgs::ChannelReady {
4100                                 channel_id: self.context.channel_id(),
4101                                 next_per_commitment_point,
4102                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4103                         })
4104                 } else { None };
4105
4106                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4107                         if required_revoke.is_some() {
4108                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4109                         } else {
4110                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4111                         }
4112
4113                         Ok(ReestablishResponses {
4114                                 channel_ready, shutdown_msg, announcement_sigs,
4115                                 raa: required_revoke,
4116                                 commitment_update: None,
4117                                 order: self.context.resend_order.clone(),
4118                         })
4119                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4120                         if required_revoke.is_some() {
4121                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4122                         } else {
4123                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4124                         }
4125
4126                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4127                                 self.context.monitor_pending_commitment_signed = true;
4128                                 Ok(ReestablishResponses {
4129                                         channel_ready, shutdown_msg, announcement_sigs,
4130                                         commitment_update: None, raa: None,
4131                                         order: self.context.resend_order.clone(),
4132                                 })
4133                         } else {
4134                                 Ok(ReestablishResponses {
4135                                         channel_ready, shutdown_msg, announcement_sigs,
4136                                         raa: required_revoke,
4137                                         commitment_update: Some(self.get_last_commitment_update(logger)),
4138                                         order: self.context.resend_order.clone(),
4139                                 })
4140                         }
4141                 } else {
4142                         Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4143                 }
4144         }
4145
4146         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4147         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4148         /// at which point they will be recalculated.
4149         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4150                 -> (u64, u64)
4151                 where F::Target: FeeEstimator
4152         {
4153                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4154
4155                 // Propose a range from our current Background feerate to our Normal feerate plus our
4156                 // force_close_avoidance_max_fee_satoshis.
4157                 // If we fail to come to consensus, we'll have to force-close.
4158                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
4159                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
4160                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4161
4162                 // The spec requires that (when the channel does not have anchors) we only send absolute
4163                 // channel fees no greater than the absolute channel fee on the current commitment
4164                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4165                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4166                 // some force-closure by old nodes, but we wanted to close the channel anyway.
4167
4168                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4169                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4170                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4171                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4172                 }
4173
4174                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4175                 // below our dust limit, causing the output to disappear. We don't bother handling this
4176                 // case, however, as this should only happen if a channel is closed before any (material)
4177                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4178                 // come to consensus with our counterparty on appropriate fees, however it should be a
4179                 // relatively rare case. We can revisit this later, though note that in order to determine
4180                 // if the funders' output is dust we have to know the absolute fee we're going to use.
4181                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4182                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4183                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4184                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
4185                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
4186                                 // target feerate-calculated fee.
4187                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4188                                         proposed_max_feerate as u64 * tx_weight / 1000)
4189                         } else {
4190                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4191                         };
4192
4193                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4194                 self.context.closing_fee_limits.clone().unwrap()
4195         }
4196
4197         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4198         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4199         /// this point if we're the funder we should send the initial closing_signed, and in any case
4200         /// shutdown should complete within a reasonable timeframe.
4201         fn closing_negotiation_ready(&self) -> bool {
4202                 self.context.closing_negotiation_ready()
4203         }
4204
4205         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4206         /// an Err if no progress is being made and the channel should be force-closed instead.
4207         /// Should be called on a one-minute timer.
4208         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4209                 if self.closing_negotiation_ready() {
4210                         if self.context.closing_signed_in_flight {
4211                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4212                         } else {
4213                                 self.context.closing_signed_in_flight = true;
4214                         }
4215                 }
4216                 Ok(())
4217         }
4218
4219         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4220                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4221                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4222                 where F::Target: FeeEstimator, L::Target: Logger
4223         {
4224                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4225                         return Ok((None, None));
4226                 }
4227
4228                 if !self.context.is_outbound() {
4229                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4230                                 return self.closing_signed(fee_estimator, &msg);
4231                         }
4232                         return Ok((None, None));
4233                 }
4234
4235                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4236
4237                 assert!(self.context.shutdown_scriptpubkey.is_some());
4238                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4239                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4240                         our_min_fee, our_max_fee, total_fee_satoshis);
4241
4242                 match &self.context.holder_signer {
4243                         ChannelSignerType::Ecdsa(ecdsa) => {
4244                                 let sig = ecdsa
4245                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4246                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4247
4248                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4249                                 Ok((Some(msgs::ClosingSigned {
4250                                         channel_id: self.context.channel_id,
4251                                         fee_satoshis: total_fee_satoshis,
4252                                         signature: sig,
4253                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4254                                                 min_fee_satoshis: our_min_fee,
4255                                                 max_fee_satoshis: our_max_fee,
4256                                         }),
4257                                 }), None))
4258                         }
4259                 }
4260         }
4261
4262         // Marks a channel as waiting for a response from the counterparty. If it's not received
4263         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4264         // a reconnection.
4265         fn mark_awaiting_response(&mut self) {
4266                 self.context.sent_message_awaiting_response = Some(0);
4267         }
4268
4269         /// Determines whether we should disconnect the counterparty due to not receiving a response
4270         /// within our expected timeframe.
4271         ///
4272         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4273         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4274                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4275                         ticks_elapsed
4276                 } else {
4277                         // Don't disconnect when we're not waiting on a response.
4278                         return false;
4279                 };
4280                 *ticks_elapsed += 1;
4281                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4282         }
4283
4284         pub fn shutdown(
4285                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4286         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4287         {
4288                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4289                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4290                 }
4291                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4292                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
4293                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4294                         // can do that via error message without getting a connection fail anyway...
4295                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4296                 }
4297                 for htlc in self.context.pending_inbound_htlcs.iter() {
4298                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4299                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4300                         }
4301                 }
4302                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4303
4304                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4305                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4306                 }
4307
4308                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4309                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4310                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4311                         }
4312                 } else {
4313                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4314                 }
4315
4316                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4317                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4318                 // any further commitment updates after we set LocalShutdownSent.
4319                 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4320
4321                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4322                         Some(_) => false,
4323                         None => {
4324                                 assert!(send_shutdown);
4325                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4326                                         Ok(scriptpubkey) => scriptpubkey,
4327                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4328                                 };
4329                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
4330                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4331                                 }
4332                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4333                                 true
4334                         },
4335                 };
4336
4337                 // From here on out, we may not fail!
4338
4339                 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4340                 self.context.update_time_counter += 1;
4341
4342                 let monitor_update = if update_shutdown_script {
4343                         self.context.latest_monitor_update_id += 1;
4344                         let monitor_update = ChannelMonitorUpdate {
4345                                 update_id: self.context.latest_monitor_update_id,
4346                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4347                                         scriptpubkey: self.get_closing_scriptpubkey(),
4348                                 }],
4349                         };
4350                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4351                         self.push_ret_blockable_mon_update(monitor_update)
4352                 } else { None };
4353                 let shutdown = if send_shutdown {
4354                         Some(msgs::Shutdown {
4355                                 channel_id: self.context.channel_id,
4356                                 scriptpubkey: self.get_closing_scriptpubkey(),
4357                         })
4358                 } else { None };
4359
4360                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4361                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4362                 // cell HTLCs and return them to fail the payment.
4363                 self.context.holding_cell_update_fee = None;
4364                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4365                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4366                         match htlc_update {
4367                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4368                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4369                                         false
4370                                 },
4371                                 _ => true
4372                         }
4373                 });
4374
4375                 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4376                 self.context.update_time_counter += 1;
4377
4378                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4379         }
4380
4381         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4382                 let mut tx = closing_tx.trust().built_transaction().clone();
4383
4384                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4385
4386                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4387                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4388                 let mut holder_sig = sig.serialize_der().to_vec();
4389                 holder_sig.push(EcdsaSighashType::All as u8);
4390                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4391                 cp_sig.push(EcdsaSighashType::All as u8);
4392                 if funding_key[..] < counterparty_funding_key[..] {
4393                         tx.input[0].witness.push(holder_sig);
4394                         tx.input[0].witness.push(cp_sig);
4395                 } else {
4396                         tx.input[0].witness.push(cp_sig);
4397                         tx.input[0].witness.push(holder_sig);
4398                 }
4399
4400                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4401                 tx
4402         }
4403
4404         pub fn closing_signed<F: Deref>(
4405                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4406                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4407                 where F::Target: FeeEstimator
4408         {
4409                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4410                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4411                 }
4412                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4413                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4414                 }
4415                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4416                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4417                 }
4418                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4419                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4420                 }
4421
4422                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4423                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4424                 }
4425
4426                 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4427                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
4428                         return Ok((None, None));
4429                 }
4430
4431                 let funding_redeemscript = self.context.get_funding_redeemscript();
4432                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4433                 if used_total_fee != msg.fee_satoshis {
4434                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4435                 }
4436                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4437
4438                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4439                         Ok(_) => {},
4440                         Err(_e) => {
4441                                 // The remote end may have decided to revoke their output due to inconsistent dust
4442                                 // limits, so check for that case by re-checking the signature here.
4443                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4444                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4445                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4446                         },
4447                 };
4448
4449                 for outp in closing_tx.trust().built_transaction().output.iter() {
4450                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4451                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4452                         }
4453                 }
4454
4455                 assert!(self.context.shutdown_scriptpubkey.is_some());
4456                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4457                         if last_fee == msg.fee_satoshis {
4458                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4459                                 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4460                                 self.context.update_time_counter += 1;
4461                                 return Ok((None, Some(tx)));
4462                         }
4463                 }
4464
4465                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4466
4467                 macro_rules! propose_fee {
4468                         ($new_fee: expr) => {
4469                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4470                                         (closing_tx, $new_fee)
4471                                 } else {
4472                                         self.build_closing_transaction($new_fee, false)
4473                                 };
4474
4475                                 return match &self.context.holder_signer {
4476                                         ChannelSignerType::Ecdsa(ecdsa) => {
4477                                                 let sig = ecdsa
4478                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4479                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4480
4481                                                 let signed_tx = if $new_fee == msg.fee_satoshis {
4482                                                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
4483                                                         self.context.update_time_counter += 1;
4484                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4485                                                         Some(tx)
4486                                                 } else { None };
4487
4488                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4489                                                 Ok((Some(msgs::ClosingSigned {
4490                                                         channel_id: self.context.channel_id,
4491                                                         fee_satoshis: used_fee,
4492                                                         signature: sig,
4493                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4494                                                                 min_fee_satoshis: our_min_fee,
4495                                                                 max_fee_satoshis: our_max_fee,
4496                                                         }),
4497                                                 }), signed_tx))
4498                                         }
4499                                 }
4500                         }
4501                 }
4502
4503                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4504                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4505                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4506                         }
4507                         if max_fee_satoshis < our_min_fee {
4508                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4509                         }
4510                         if min_fee_satoshis > our_max_fee {
4511                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4512                         }
4513
4514                         if !self.context.is_outbound() {
4515                                 // They have to pay, so pick the highest fee in the overlapping range.
4516                                 // We should never set an upper bound aside from their full balance
4517                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4518                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4519                         } else {
4520                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4521                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4522                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
4523                                 }
4524                                 // The proposed fee is in our acceptable range, accept it and broadcast!
4525                                 propose_fee!(msg.fee_satoshis);
4526                         }
4527                 } else {
4528                         // Old fee style negotiation. We don't bother to enforce whether they are complying
4529                         // with the "making progress" requirements, we just comply and hope for the best.
4530                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4531                                 if msg.fee_satoshis > last_fee {
4532                                         if msg.fee_satoshis < our_max_fee {
4533                                                 propose_fee!(msg.fee_satoshis);
4534                                         } else if last_fee < our_max_fee {
4535                                                 propose_fee!(our_max_fee);
4536                                         } else {
4537                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4538                                         }
4539                                 } else {
4540                                         if msg.fee_satoshis > our_min_fee {
4541                                                 propose_fee!(msg.fee_satoshis);
4542                                         } else if last_fee > our_min_fee {
4543                                                 propose_fee!(our_min_fee);
4544                                         } else {
4545                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4546                                         }
4547                                 }
4548                         } else {
4549                                 if msg.fee_satoshis < our_min_fee {
4550                                         propose_fee!(our_min_fee);
4551                                 } else if msg.fee_satoshis > our_max_fee {
4552                                         propose_fee!(our_max_fee);
4553                                 } else {
4554                                         propose_fee!(msg.fee_satoshis);
4555                                 }
4556                         }
4557                 }
4558         }
4559
4560         fn internal_htlc_satisfies_config(
4561                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4562         ) -> Result<(), (&'static str, u16)> {
4563                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4564                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4565                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4566                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4567                         return Err((
4568                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4569                                 0x1000 | 12, // fee_insufficient
4570                         ));
4571                 }
4572                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4573                         return Err((
4574                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4575                                 0x1000 | 13, // incorrect_cltv_expiry
4576                         ));
4577                 }
4578                 Ok(())
4579         }
4580
4581         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4582         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4583         /// unsuccessful, falls back to the previous one if one exists.
4584         pub fn htlc_satisfies_config(
4585                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4586         ) -> Result<(), (&'static str, u16)> {
4587                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4588                         .or_else(|err| {
4589                                 if let Some(prev_config) = self.context.prev_config() {
4590                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4591                                 } else {
4592                                         Err(err)
4593                                 }
4594                         })
4595         }
4596
4597         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4598                 self.context.cur_holder_commitment_transaction_number + 1
4599         }
4600
4601         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4602                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4603         }
4604
4605         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4606                 self.context.cur_counterparty_commitment_transaction_number + 2
4607         }
4608
4609         #[cfg(test)]
4610         pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
4611                 &self.context.holder_signer
4612         }
4613
4614         #[cfg(test)]
4615         pub fn get_value_stat(&self) -> ChannelValueStat {
4616                 ChannelValueStat {
4617                         value_to_self_msat: self.context.value_to_self_msat,
4618                         channel_value_msat: self.context.channel_value_satoshis * 1000,
4619                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4620                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4621                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4622                         holding_cell_outbound_amount_msat: {
4623                                 let mut res = 0;
4624                                 for h in self.context.holding_cell_htlc_updates.iter() {
4625                                         match h {
4626                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4627                                                         res += amount_msat;
4628                                                 }
4629                                                 _ => {}
4630                                         }
4631                                 }
4632                                 res
4633                         },
4634                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4635                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4636                 }
4637         }
4638
4639         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4640         /// Allowed in any state (including after shutdown)
4641         pub fn is_awaiting_monitor_update(&self) -> bool {
4642                 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4643         }
4644
4645         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4646         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4647                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4648                 self.context.blocked_monitor_updates[0].update.update_id - 1
4649         }
4650
4651         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4652         /// further blocked monitor update exists after the next.
4653         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4654                 if self.context.blocked_monitor_updates.is_empty() { return None; }
4655                 Some((self.context.blocked_monitor_updates.remove(0).update,
4656                         !self.context.blocked_monitor_updates.is_empty()))
4657         }
4658
4659         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4660         /// immediately given to the user for persisting or `None` if it should be held as blocked.
4661         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4662         -> Option<ChannelMonitorUpdate> {
4663                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4664                 if !release_monitor {
4665                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4666                                 update,
4667                         });
4668                         None
4669                 } else {
4670                         Some(update)
4671                 }
4672         }
4673
4674         pub fn blocked_monitor_updates_pending(&self) -> usize {
4675                 self.context.blocked_monitor_updates.len()
4676         }
4677
4678         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4679         /// If the channel is outbound, this implies we have not yet broadcasted the funding
4680         /// transaction. If the channel is inbound, this implies simply that the channel has not
4681         /// advanced state.
4682         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4683                 if !self.is_awaiting_monitor_update() { return false; }
4684                 if self.context.channel_state &
4685                         !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4686                                 == ChannelState::FundingSent as u32 {
4687                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4688                         // FundingSent set, though our peer could have sent their channel_ready.
4689                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4690                         return true;
4691                 }
4692                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4693                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4694                         // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4695                         // waiting for the initial monitor persistence. Thus, we check if our commitment
4696                         // transaction numbers have both been iterated only exactly once (for the
4697                         // funding_signed), and we're awaiting monitor update.
4698                         //
4699                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4700                         // only way to get an awaiting-monitor-update state during initial funding is if the
4701                         // initial monitor persistence is still pending).
4702                         //
4703                         // Because deciding we're awaiting initial broadcast spuriously could result in
4704                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4705                         // we hard-assert here, even in production builds.
4706                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4707                         assert!(self.context.monitor_pending_channel_ready);
4708                         assert_eq!(self.context.latest_monitor_update_id, 0);
4709                         return true;
4710                 }
4711                 false
4712         }
4713
4714         /// Returns true if our channel_ready has been sent
4715         pub fn is_our_channel_ready(&self) -> bool {
4716                 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4717         }
4718
4719         /// Returns true if our peer has either initiated or agreed to shut down the channel.
4720         pub fn received_shutdown(&self) -> bool {
4721                 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4722         }
4723
4724         /// Returns true if we either initiated or agreed to shut down the channel.
4725         pub fn sent_shutdown(&self) -> bool {
4726                 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4727         }
4728
4729         /// Returns true if this channel is fully shut down. True here implies that no further actions
4730         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4731         /// will be handled appropriately by the chain monitor.
4732         pub fn is_shutdown(&self) -> bool {
4733                 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
4734                         assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4735                         true
4736                 } else { false }
4737         }
4738
4739         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4740                 self.context.channel_update_status
4741         }
4742
4743         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4744                 self.context.update_time_counter += 1;
4745                 self.context.channel_update_status = status;
4746         }
4747
4748         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4749                 // Called:
4750                 //  * always when a new block/transactions are confirmed with the new height
4751                 //  * when funding is signed with a height of 0
4752                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4753                         return None;
4754                 }
4755
4756                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4757                 if funding_tx_confirmations <= 0 {
4758                         self.context.funding_tx_confirmation_height = 0;
4759                 }
4760
4761                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4762                         return None;
4763                 }
4764
4765                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4766                 // channel_ready until the entire batch is ready.
4767                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4768                 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4769                         self.context.channel_state |= ChannelState::OurChannelReady as u32;
4770                         true
4771                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4772                         self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4773                         self.context.update_time_counter += 1;
4774                         true
4775                 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4776                         // We got a reorg but not enough to trigger a force close, just ignore.
4777                         false
4778                 } else {
4779                         if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4780                                 // We should never see a funding transaction on-chain until we've received
4781                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4782                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4783                                 // however, may do this and we shouldn't treat it as a bug.
4784                                 #[cfg(not(fuzzing))]
4785                                 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4786                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4787                                         self.context.channel_state);
4788                         }
4789                         // We got a reorg but not enough to trigger a force close, just ignore.
4790                         false
4791                 };
4792
4793                 if need_commitment_update {
4794                         if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4795                                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4796                                         let next_per_commitment_point =
4797                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4798                                         return Some(msgs::ChannelReady {
4799                                                 channel_id: self.context.channel_id,
4800                                                 next_per_commitment_point,
4801                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4802                                         });
4803                                 }
4804                         } else {
4805                                 self.context.monitor_pending_channel_ready = true;
4806                         }
4807                 }
4808                 None
4809         }
4810
4811         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4812         /// In the first case, we store the confirmation height and calculating the short channel id.
4813         /// In the second, we simply return an Err indicating we need to be force-closed now.
4814         pub fn transactions_confirmed<NS: Deref, L: Deref>(
4815                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
4816                 genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
4817         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4818         where
4819                 NS::Target: NodeSigner,
4820                 L::Target: Logger
4821         {
4822                 let mut msgs = (None, None);
4823                 if let Some(funding_txo) = self.context.get_funding_txo() {
4824                         for &(index_in_block, tx) in txdata.iter() {
4825                                 // Check if the transaction is the expected funding transaction, and if it is,
4826                                 // check that it pays the right amount to the right script.
4827                                 if self.context.funding_tx_confirmation_height == 0 {
4828                                         if tx.txid() == funding_txo.txid {
4829                                                 let txo_idx = funding_txo.index as usize;
4830                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
4831                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
4832                                                         if self.context.is_outbound() {
4833                                                                 // If we generated the funding transaction and it doesn't match what it
4834                                                                 // should, the client is really broken and we should just panic and
4835                                                                 // tell them off. That said, because hash collisions happen with high
4836                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
4837                                                                 // channel and move on.
4838                                                                 #[cfg(not(fuzzing))]
4839                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4840                                                         }
4841                                                         self.context.update_time_counter += 1;
4842                                                         let err_reason = "funding tx had wrong script/value or output index";
4843                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
4844                                                 } else {
4845                                                         if self.context.is_outbound() {
4846                                                                 if !tx.is_coin_base() {
4847                                                                         for input in tx.input.iter() {
4848                                                                                 if input.witness.is_empty() {
4849                                                                                         // We generated a malleable funding transaction, implying we've
4850                                                                                         // just exposed ourselves to funds loss to our counterparty.
4851                                                                                         #[cfg(not(fuzzing))]
4852                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4853                                                                                 }
4854                                                                         }
4855                                                                 }
4856                                                         }
4857                                                         self.context.funding_tx_confirmation_height = height;
4858                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
4859                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
4860                                                                 Ok(scid) => Some(scid),
4861                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
4862                                                         }
4863                                                 }
4864                                                 // If this is a coinbase transaction and not a 0-conf channel
4865                                                 // we should update our min_depth to 100 to handle coinbase maturity
4866                                                 if tx.is_coin_base() &&
4867                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
4868                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
4869                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
4870                                                 }
4871                                         }
4872                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
4873                                         // send it immediately instead of waiting for a best_block_updated call (which
4874                                         // may have already happened for this block).
4875                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
4876                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4877                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
4878                                                 msgs = (Some(channel_ready), announcement_sigs);
4879                                         }
4880                                 }
4881                                 for inp in tx.input.iter() {
4882                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
4883                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
4884                                                 return Err(ClosureReason::CommitmentTxConfirmed);
4885                                         }
4886                                 }
4887                         }
4888                 }
4889                 Ok(msgs)
4890         }
4891
4892         /// When a new block is connected, we check the height of the block against outbound holding
4893         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
4894         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
4895         /// handled by the ChannelMonitor.
4896         ///
4897         /// If we return Err, the channel may have been closed, at which point the standard
4898         /// requirements apply - no calls may be made except those explicitly stated to be allowed
4899         /// post-shutdown.
4900         ///
4901         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
4902         /// back.
4903         pub fn best_block_updated<NS: Deref, L: Deref>(
4904                 &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
4905                 node_signer: &NS, user_config: &UserConfig, logger: &L
4906         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4907         where
4908                 NS::Target: NodeSigner,
4909                 L::Target: Logger
4910         {
4911                 self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
4912         }
4913
4914         fn do_best_block_updated<NS: Deref, L: Deref>(
4915                 &mut self, height: u32, highest_header_time: u32,
4916                 genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
4917         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4918         where
4919                 NS::Target: NodeSigner,
4920                 L::Target: Logger
4921         {
4922                 let mut timed_out_htlcs = Vec::new();
4923                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4924                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
4925                 // ~now.
4926                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
4927                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4928                         match htlc_update {
4929                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
4930                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
4931                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
4932                                                 false
4933                                         } else { true }
4934                                 },
4935                                 _ => true
4936                         }
4937                 });
4938
4939                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
4940
4941                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4942                         let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
4943                                 self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
4944                         } else { None };
4945                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4946                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
4947                 }
4948
4949                 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4950                 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
4951                    (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
4952                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4953                         if self.context.funding_tx_confirmation_height == 0 {
4954                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
4955                                 // zero if it has been reorged out, however in either case, our state flags
4956                                 // indicate we've already sent a channel_ready
4957                                 funding_tx_confirmations = 0;
4958                         }
4959
4960                         // If we've sent channel_ready (or have both sent and received channel_ready), and
4961                         // the funding transaction has become unconfirmed,
4962                         // close the channel and hope we can get the latest state on chain (because presumably
4963                         // the funding transaction is at least still in the mempool of most nodes).
4964                         //
4965                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
4966                         // 0-conf channel, but not doing so may lead to the
4967                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
4968                         // to.
4969                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
4970                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
4971                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
4972                                 return Err(ClosureReason::ProcessingError { err: err_reason });
4973                         }
4974                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
4975                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
4976                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
4977                         // If funding_tx_confirmed_in is unset, the channel must not be active
4978                         assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
4979                         assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
4980                         return Err(ClosureReason::FundingTimedOut);
4981                 }
4982
4983                 let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
4984                         self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
4985                 } else { None };
4986                 Ok((None, timed_out_htlcs, announcement_sigs))
4987         }
4988
4989         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
4990         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
4991         /// before the channel has reached channel_ready and we can just wait for more blocks.
4992         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
4993                 if self.context.funding_tx_confirmation_height != 0 {
4994                         // We handle the funding disconnection by calling best_block_updated with a height one
4995                         // below where our funding was connected, implying a reorg back to conf_height - 1.
4996                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
4997                         // We use the time field to bump the current time we set on channel updates if its
4998                         // larger. If we don't know that time has moved forward, we can just set it to the last
4999                         // time we saw and it will be ignored.
5000                         let best_time = self.context.update_time_counter;
5001                         match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
5002                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5003                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5004                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5005                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5006                                         Ok(())
5007                                 },
5008                                 Err(e) => Err(e)
5009                         }
5010                 } else {
5011                         // We never learned about the funding confirmation anyway, just ignore
5012                         Ok(())
5013                 }
5014         }
5015
5016         // Methods to get unprompted messages to send to the remote end (or where we already returned
5017         // something in the handler for the message that prompted this message):
5018
5019         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5020         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5021         /// directions). Should be used for both broadcasted announcements and in response to an
5022         /// AnnouncementSignatures message from the remote peer.
5023         ///
5024         /// Will only fail if we're not in a state where channel_announcement may be sent (including
5025         /// closing).
5026         ///
5027         /// This will only return ChannelError::Ignore upon failure.
5028         ///
5029         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5030         fn get_channel_announcement<NS: Deref>(
5031                 &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
5032         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5033                 if !self.context.config.announced_channel {
5034                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5035                 }
5036                 if !self.context.is_usable() {
5037                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5038                 }
5039
5040                 let short_channel_id = self.context.get_short_channel_id()
5041                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5042                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5043                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5044                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5045                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5046
5047                 let msg = msgs::UnsignedChannelAnnouncement {
5048                         features: channelmanager::provided_channel_features(&user_config),
5049                         chain_hash,
5050                         short_channel_id,
5051                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5052                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5053                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5054                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5055                         excess_data: Vec::new(),
5056                 };
5057
5058                 Ok(msg)
5059         }
5060
5061         fn get_announcement_sigs<NS: Deref, L: Deref>(
5062                 &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
5063                 best_block_height: u32, logger: &L
5064         ) -> Option<msgs::AnnouncementSignatures>
5065         where
5066                 NS::Target: NodeSigner,
5067                 L::Target: Logger
5068         {
5069                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5070                         return None;
5071                 }
5072
5073                 if !self.context.is_usable() {
5074                         return None;
5075                 }
5076
5077                 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5078                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5079                         return None;
5080                 }
5081
5082                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5083                         return None;
5084                 }
5085
5086                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5087                 let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
5088                         Ok(a) => a,
5089                         Err(e) => {
5090                                 log_trace!(logger, "{:?}", e);
5091                                 return None;
5092                         }
5093                 };
5094                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5095                         Err(_) => {
5096                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5097                                 return None;
5098                         },
5099                         Ok(v) => v
5100                 };
5101                 match &self.context.holder_signer {
5102                         ChannelSignerType::Ecdsa(ecdsa) => {
5103                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5104                                         Err(_) => {
5105                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5106                                                 return None;
5107                                         },
5108                                         Ok(v) => v
5109                                 };
5110                                 let short_channel_id = match self.context.get_short_channel_id() {
5111                                         Some(scid) => scid,
5112                                         None => return None,
5113                                 };
5114
5115                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5116
5117                                 Some(msgs::AnnouncementSignatures {
5118                                         channel_id: self.context.channel_id(),
5119                                         short_channel_id,
5120                                         node_signature: our_node_sig,
5121                                         bitcoin_signature: our_bitcoin_sig,
5122                                 })
5123                         }
5124                 }
5125         }
5126
5127         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5128         /// available.
5129         fn sign_channel_announcement<NS: Deref>(
5130                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5131         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5132                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5133                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5134                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5135                         let were_node_one = announcement.node_id_1 == our_node_key;
5136
5137                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5138                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5139                         match &self.context.holder_signer {
5140                                 ChannelSignerType::Ecdsa(ecdsa) => {
5141                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5142                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5143                                         Ok(msgs::ChannelAnnouncement {
5144                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5145                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5146                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5147                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5148                                                 contents: announcement,
5149                                         })
5150                                 }
5151                         }
5152                 } else {
5153                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5154                 }
5155         }
5156
5157         /// Processes an incoming announcement_signatures message, providing a fully-signed
5158         /// channel_announcement message which we can broadcast and storing our counterparty's
5159         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5160         pub fn announcement_signatures<NS: Deref>(
5161                 &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
5162                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5163         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5164                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5165
5166                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5167
5168                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5169                         return Err(ChannelError::Close(format!(
5170                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5171                                  &announcement, self.context.get_counterparty_node_id())));
5172                 }
5173                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5174                         return Err(ChannelError::Close(format!(
5175                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5176                                 &announcement, self.context.counterparty_funding_pubkey())));
5177                 }
5178
5179                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5180                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5181                         return Err(ChannelError::Ignore(
5182                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5183                 }
5184
5185                 self.sign_channel_announcement(node_signer, announcement)
5186         }
5187
5188         /// Gets a signed channel_announcement for this channel, if we previously received an
5189         /// announcement_signatures from our counterparty.
5190         pub fn get_signed_channel_announcement<NS: Deref>(
5191                 &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
5192         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5193                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5194                         return None;
5195                 }
5196                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5197                         Ok(res) => res,
5198                         Err(_) => return None,
5199                 };
5200                 match self.sign_channel_announcement(node_signer, announcement) {
5201                         Ok(res) => Some(res),
5202                         Err(_) => None,
5203                 }
5204         }
5205
5206         /// May panic if called on a channel that wasn't immediately-previously
5207         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5208         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5209                 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5210                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5211                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5212                 // current to_remote balances. However, it no longer has any use, and thus is now simply
5213                 // set to a dummy (but valid, as required by the spec) public key.
5214                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5215                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5216                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5217                 let mut pk = [2; 33]; pk[1] = 0xff;
5218                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5219                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5220                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5221                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5222                         remote_last_secret
5223                 } else {
5224                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5225                         [0;32]
5226                 };
5227                 self.mark_awaiting_response();
5228                 msgs::ChannelReestablish {
5229                         channel_id: self.context.channel_id(),
5230                         // The protocol has two different commitment number concepts - the "commitment
5231                         // transaction number", which starts from 0 and counts up, and the "revocation key
5232                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5233                         // commitment transaction numbers by the index which will be used to reveal the
5234                         // revocation key for that commitment transaction, which means we have to convert them
5235                         // to protocol-level commitment numbers here...
5236
5237                         // next_local_commitment_number is the next commitment_signed number we expect to
5238                         // receive (indicating if they need to resend one that we missed).
5239                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5240                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5241                         // receive, however we track it by the next commitment number for a remote transaction
5242                         // (which is one further, as they always revoke previous commitment transaction, not
5243                         // the one we send) so we have to decrement by 1. Note that if
5244                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5245                         // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5246                         // overflow here.
5247                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5248                         your_last_per_commitment_secret: remote_last_secret,
5249                         my_current_per_commitment_point: dummy_pubkey,
5250                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5251                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5252                         // txid of that interactive transaction, else we MUST NOT set it.
5253                         next_funding_txid: None,
5254                 }
5255         }
5256
5257
5258         // Send stuff to our remote peers:
5259
5260         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5261         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5262         /// commitment update.
5263         ///
5264         /// `Err`s will only be [`ChannelError::Ignore`].
5265         pub fn queue_add_htlc<F: Deref, L: Deref>(
5266                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5267                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5268                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5269         ) -> Result<(), ChannelError>
5270         where F::Target: FeeEstimator, L::Target: Logger
5271         {
5272                 self
5273                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5274                                 skimmed_fee_msat, fee_estimator, logger)
5275                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5276                         .map_err(|err| {
5277                                 if let ChannelError::Ignore(_) = err { /* fine */ }
5278                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5279                                 err
5280                         })
5281         }
5282
5283         /// Adds a pending outbound HTLC to this channel, note that you probably want
5284         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5285         ///
5286         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5287         /// the wire:
5288         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5289         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5290         ///   awaiting ACK.
5291         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5292         ///   we may not yet have sent the previous commitment update messages and will need to
5293         ///   regenerate them.
5294         ///
5295         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5296         /// on this [`Channel`] if `force_holding_cell` is false.
5297         ///
5298         /// `Err`s will only be [`ChannelError::Ignore`].
5299         fn send_htlc<F: Deref, L: Deref>(
5300                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5301                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5302                 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5303         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5304         where F::Target: FeeEstimator, L::Target: Logger
5305         {
5306                 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5307                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5308                 }
5309                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5310                 if amount_msat > channel_total_msat {
5311                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5312                 }
5313
5314                 if amount_msat == 0 {
5315                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5316                 }
5317
5318                 let available_balances = self.context.get_available_balances(fee_estimator);
5319                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5320                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5321                                 available_balances.next_outbound_htlc_minimum_msat)));
5322                 }
5323
5324                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5325                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5326                                 available_balances.next_outbound_htlc_limit_msat)));
5327                 }
5328
5329                 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5330                         // Note that this should never really happen, if we're !is_live() on receipt of an
5331                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5332                         // the user to send directly into a !is_live() channel. However, if we
5333                         // disconnected during the time the previous hop was doing the commitment dance we may
5334                         // end up getting here after the forwarding delay. In any case, returning an
5335                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5336                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5337                 }
5338
5339                 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5340                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5341                         payment_hash, amount_msat,
5342                         if force_holding_cell { "into holding cell" }
5343                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5344                         else { "to peer" });
5345
5346                 if need_holding_cell {
5347                         force_holding_cell = true;
5348                 }
5349
5350                 // Now update local state:
5351                 if force_holding_cell {
5352                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5353                                 amount_msat,
5354                                 payment_hash,
5355                                 cltv_expiry,
5356                                 source,
5357                                 onion_routing_packet,
5358                                 skimmed_fee_msat,
5359                         });
5360                         return Ok(None);
5361                 }
5362
5363                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5364                         htlc_id: self.context.next_holder_htlc_id,
5365                         amount_msat,
5366                         payment_hash: payment_hash.clone(),
5367                         cltv_expiry,
5368                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5369                         source,
5370                         skimmed_fee_msat,
5371                 });
5372
5373                 let res = msgs::UpdateAddHTLC {
5374                         channel_id: self.context.channel_id,
5375                         htlc_id: self.context.next_holder_htlc_id,
5376                         amount_msat,
5377                         payment_hash,
5378                         cltv_expiry,
5379                         onion_routing_packet,
5380                         skimmed_fee_msat,
5381                 };
5382                 self.context.next_holder_htlc_id += 1;
5383
5384                 Ok(Some(res))
5385         }
5386
5387         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5388                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5389                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5390                 // fail to generate this, we still are at least at a position where upgrading their status
5391                 // is acceptable.
5392                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5393                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5394                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5395                         } else { None };
5396                         if let Some(state) = new_state {
5397                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5398                                 htlc.state = state;
5399                         }
5400                 }
5401                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5402                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5403                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5404                                 // Grab the preimage, if it exists, instead of cloning
5405                                 let mut reason = OutboundHTLCOutcome::Success(None);
5406                                 mem::swap(outcome, &mut reason);
5407                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5408                         }
5409                 }
5410                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5411                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5412                                 debug_assert!(!self.context.is_outbound());
5413                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5414                                 self.context.feerate_per_kw = feerate;
5415                                 self.context.pending_update_fee = None;
5416                         }
5417                 }
5418                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5419
5420                 let (mut htlcs_ref, counterparty_commitment_tx) =
5421                         self.build_commitment_no_state_update(logger);
5422                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5423                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5424                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5425
5426                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5427                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5428                 }
5429
5430                 self.context.latest_monitor_update_id += 1;
5431                 let monitor_update = ChannelMonitorUpdate {
5432                         update_id: self.context.latest_monitor_update_id,
5433                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5434                                 commitment_txid: counterparty_commitment_txid,
5435                                 htlc_outputs: htlcs.clone(),
5436                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5437                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5438                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5439                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5440                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5441                         }]
5442                 };
5443                 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5444                 monitor_update
5445         }
5446
5447         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5448         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5449         where L::Target: Logger
5450         {
5451                 let counterparty_keys = self.context.build_remote_transaction_keys();
5452                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5453                 let counterparty_commitment_tx = commitment_stats.tx;
5454
5455                 #[cfg(any(test, fuzzing))]
5456                 {
5457                         if !self.context.is_outbound() {
5458                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5459                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5460                                 if let Some(info) = projected_commit_tx_info {
5461                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5462                                         if info.total_pending_htlcs == total_pending_htlcs
5463                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5464                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5465                                                 && info.feerate == self.context.feerate_per_kw {
5466                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5467                                                         assert_eq!(actual_fee, info.fee);
5468                                                 }
5469                                 }
5470                         }
5471                 }
5472
5473                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5474         }
5475
5476         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5477         /// generation when we shouldn't change HTLC/channel state.
5478         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5479                 // Get the fee tests from `build_commitment_no_state_update`
5480                 #[cfg(any(test, fuzzing))]
5481                 self.build_commitment_no_state_update(logger);
5482
5483                 let counterparty_keys = self.context.build_remote_transaction_keys();
5484                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5485                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5486
5487                 match &self.context.holder_signer {
5488                         ChannelSignerType::Ecdsa(ecdsa) => {
5489                                 let (signature, htlc_signatures);
5490
5491                                 {
5492                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5493                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5494                                                 htlcs.push(htlc);
5495                                         }
5496
5497                                         let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5498                                                 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
5499                                         signature = res.0;
5500                                         htlc_signatures = res.1;
5501
5502                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5503                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5504                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5505                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5506
5507                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5508                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5509                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5510                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5511                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5512                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5513                                         }
5514                                 }
5515
5516                                 Ok((msgs::CommitmentSigned {
5517                                         channel_id: self.context.channel_id,
5518                                         signature,
5519                                         htlc_signatures,
5520                                         #[cfg(taproot)]
5521                                         partial_signature_with_nonce: None,
5522                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5523                         }
5524                 }
5525         }
5526
5527         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5528         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5529         ///
5530         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5531         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5532         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5533                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5534                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5535                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5536         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5537         where F::Target: FeeEstimator, L::Target: Logger
5538         {
5539                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5540                         onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5541                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5542                 match send_res? {
5543                         Some(_) => {
5544                                 let monitor_update = self.build_commitment_no_status_check(logger);
5545                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5546                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
5547                         },
5548                         None => Ok(None)
5549                 }
5550         }
5551
5552         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5553         /// happened.
5554         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5555                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5556                         fee_base_msat: msg.contents.fee_base_msat,
5557                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5558                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
5559                 });
5560                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5561                 if did_change {
5562                         self.context.counterparty_forwarding_info = new_forwarding_info;
5563                 }
5564
5565                 Ok(did_change)
5566         }
5567
5568         /// Begins the shutdown process, getting a message for the remote peer and returning all
5569         /// holding cell HTLCs for payment failure.
5570         ///
5571         /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5572         /// [`ChannelMonitorUpdate`] will be returned).
5573         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5574                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5575         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
5576         {
5577                 for htlc in self.context.pending_outbound_htlcs.iter() {
5578                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5579                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5580                         }
5581                 }
5582                 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5583                         if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5584                                 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5585                         }
5586                         else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5587                                 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5588                         }
5589                 }
5590                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5591                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5592                 }
5593                 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5594                 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5595                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5596                 }
5597
5598                 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5599                 // script is set, we just force-close and call it a day.
5600                 let mut chan_closed = false;
5601                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5602                         chan_closed = true;
5603                 }
5604
5605                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5606                         Some(_) => false,
5607                         None if !chan_closed => {
5608                                 // use override shutdown script if provided
5609                                 let shutdown_scriptpubkey = match override_shutdown_script {
5610                                         Some(script) => script,
5611                                         None => {
5612                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
5613                                                 match signer_provider.get_shutdown_scriptpubkey() {
5614                                                         Ok(scriptpubkey) => scriptpubkey,
5615                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5616                                                 }
5617                                         },
5618                                 };
5619                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5620                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5621                                 }
5622                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5623                                 true
5624                         },
5625                         None => false,
5626                 };
5627
5628                 // From here on out, we may not fail!
5629                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5630                 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5631                         self.context.channel_state = ChannelState::ShutdownComplete as u32;
5632                 } else {
5633                         self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5634                 }
5635                 self.context.update_time_counter += 1;
5636
5637                 let monitor_update = if update_shutdown_script {
5638                         self.context.latest_monitor_update_id += 1;
5639                         let monitor_update = ChannelMonitorUpdate {
5640                                 update_id: self.context.latest_monitor_update_id,
5641                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5642                                         scriptpubkey: self.get_closing_scriptpubkey(),
5643                                 }],
5644                         };
5645                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5646                         self.push_ret_blockable_mon_update(monitor_update)
5647                 } else { None };
5648                 let shutdown = msgs::Shutdown {
5649                         channel_id: self.context.channel_id,
5650                         scriptpubkey: self.get_closing_scriptpubkey(),
5651                 };
5652
5653                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5654                 // our shutdown until we've committed all of the pending changes.
5655                 self.context.holding_cell_update_fee = None;
5656                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5657                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5658                         match htlc_update {
5659                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5660                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5661                                         false
5662                                 },
5663                                 _ => true
5664                         }
5665                 });
5666
5667                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5668                         "we can't both complete shutdown and return a monitor update");
5669
5670                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5671         }
5672
5673         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5674                 self.context.holding_cell_htlc_updates.iter()
5675                         .flat_map(|htlc_update| {
5676                                 match htlc_update {
5677                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5678                                                 => Some((source, payment_hash)),
5679                                         _ => None,
5680                                 }
5681                         })
5682                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5683         }
5684 }
5685
5686 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5687 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5688         pub context: ChannelContext<SP>,
5689         pub unfunded_context: UnfundedChannelContext,
5690 }
5691
5692 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5693         pub fn new<ES: Deref, F: Deref>(
5694                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5695                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5696                 outbound_scid_alias: u64
5697         ) -> Result<OutboundV1Channel<SP>, APIError>
5698         where ES::Target: EntropySource,
5699               F::Target: FeeEstimator
5700         {
5701                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5702                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5703                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5704                 let pubkeys = holder_signer.pubkeys().clone();
5705
5706                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5707                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5708                 }
5709                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5710                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5711                 }
5712                 let channel_value_msat = channel_value_satoshis * 1000;
5713                 if push_msat > channel_value_msat {
5714                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5715                 }
5716                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5717                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5718                 }
5719                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5720                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5721                         // Protocol level safety check in place, although it should never happen because
5722                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5723                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5724                 }
5725
5726                 let channel_type = Self::get_initial_channel_type(&config, their_features);
5727                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5728
5729                 let commitment_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5730                         ConfirmationTarget::MempoolMinimum
5731                 } else {
5732                         ConfirmationTarget::Normal
5733                 };
5734                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5735
5736                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5737                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5738                 if value_to_self_msat < commitment_tx_fee {
5739                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5740                 }
5741
5742                 let mut secp_ctx = Secp256k1::new();
5743                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5744
5745                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5746                         match signer_provider.get_shutdown_scriptpubkey() {
5747                                 Ok(scriptpubkey) => Some(scriptpubkey),
5748                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5749                         }
5750                 } else { None };
5751
5752                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5753                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
5754                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5755                         }
5756                 }
5757
5758                 let destination_script = match signer_provider.get_destination_script() {
5759                         Ok(script) => script,
5760                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5761                 };
5762
5763                 let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
5764
5765                 Ok(Self {
5766                         context: ChannelContext {
5767                                 user_id,
5768
5769                                 config: LegacyChannelConfig {
5770                                         options: config.channel_config.clone(),
5771                                         announced_channel: config.channel_handshake_config.announced_channel,
5772                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5773                                 },
5774
5775                                 prev_config: None,
5776
5777                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5778
5779                                 channel_id: temporary_channel_id,
5780                                 temporary_channel_id: Some(temporary_channel_id),
5781                                 channel_state: ChannelState::OurInitSent as u32,
5782                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
5783                                 secp_ctx,
5784                                 channel_value_satoshis,
5785
5786                                 latest_monitor_update_id: 0,
5787
5788                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
5789                                 shutdown_scriptpubkey,
5790                                 destination_script,
5791
5792                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5793                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5794                                 value_to_self_msat,
5795
5796                                 pending_inbound_htlcs: Vec::new(),
5797                                 pending_outbound_htlcs: Vec::new(),
5798                                 holding_cell_htlc_updates: Vec::new(),
5799                                 pending_update_fee: None,
5800                                 holding_cell_update_fee: None,
5801                                 next_holder_htlc_id: 0,
5802                                 next_counterparty_htlc_id: 0,
5803                                 update_time_counter: 1,
5804
5805                                 resend_order: RAACommitmentOrder::CommitmentFirst,
5806
5807                                 monitor_pending_channel_ready: false,
5808                                 monitor_pending_revoke_and_ack: false,
5809                                 monitor_pending_commitment_signed: false,
5810                                 monitor_pending_forwards: Vec::new(),
5811                                 monitor_pending_failures: Vec::new(),
5812                                 monitor_pending_finalized_fulfills: Vec::new(),
5813
5814                                 #[cfg(debug_assertions)]
5815                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5816                                 #[cfg(debug_assertions)]
5817                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5818
5819                                 last_sent_closing_fee: None,
5820                                 pending_counterparty_closing_signed: None,
5821                                 closing_fee_limits: None,
5822                                 target_closing_feerate_sats_per_kw: None,
5823
5824                                 funding_tx_confirmed_in: None,
5825                                 funding_tx_confirmation_height: 0,
5826                                 short_channel_id: None,
5827                                 channel_creation_height: current_chain_height,
5828
5829                                 feerate_per_kw: commitment_feerate,
5830                                 counterparty_dust_limit_satoshis: 0,
5831                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
5832                                 counterparty_max_htlc_value_in_flight_msat: 0,
5833                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
5834                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
5835                                 holder_selected_channel_reserve_satoshis,
5836                                 counterparty_htlc_minimum_msat: 0,
5837                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
5838                                 counterparty_max_accepted_htlcs: 0,
5839                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
5840                                 minimum_depth: None, // Filled in in accept_channel
5841
5842                                 counterparty_forwarding_info: None,
5843
5844                                 channel_transaction_parameters: ChannelTransactionParameters {
5845                                         holder_pubkeys: pubkeys,
5846                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
5847                                         is_outbound_from_holder: true,
5848                                         counterparty_parameters: None,
5849                                         funding_outpoint: None,
5850                                         channel_type_features: channel_type.clone()
5851                                 },
5852                                 funding_transaction: None,
5853                                 is_batch_funding: None,
5854
5855                                 counterparty_cur_commitment_point: None,
5856                                 counterparty_prev_commitment_point: None,
5857                                 counterparty_node_id,
5858
5859                                 counterparty_shutdown_scriptpubkey: None,
5860
5861                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
5862
5863                                 channel_update_status: ChannelUpdateStatus::Enabled,
5864                                 closing_signed_in_flight: false,
5865
5866                                 announcement_sigs: None,
5867
5868                                 #[cfg(any(test, fuzzing))]
5869                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
5870                                 #[cfg(any(test, fuzzing))]
5871                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
5872
5873                                 workaround_lnd_bug_4006: None,
5874                                 sent_message_awaiting_response: None,
5875
5876                                 latest_inbound_scid_alias: None,
5877                                 outbound_scid_alias,
5878
5879                                 channel_pending_event_emitted: false,
5880                                 channel_ready_event_emitted: false,
5881
5882                                 #[cfg(any(test, fuzzing))]
5883                                 historical_inbound_htlc_fulfills: HashSet::new(),
5884
5885                                 channel_type,
5886                                 channel_keys_id,
5887
5888                                 blocked_monitor_updates: Vec::new(),
5889                         },
5890                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
5891                 })
5892         }
5893
5894         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
5895         fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5896                 let counterparty_keys = self.context.build_remote_transaction_keys();
5897                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5898                 match &self.context.holder_signer {
5899                         // TODO (taproot|arik): move match into calling method for Taproot
5900                         ChannelSignerType::Ecdsa(ecdsa) => {
5901                                 Ok(ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5902                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5903                         }
5904                 }
5905         }
5906
5907         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5908         /// a funding_created message for the remote peer.
5909         /// Panics if called at some time other than immediately after initial handshake, if called twice,
5910         /// or if called on an inbound channel.
5911         /// Note that channel_id changes during this call!
5912         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5913         /// If an Err is returned, it is a ChannelError::Close.
5914         pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
5915         -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
5916                 if !self.context.is_outbound() {
5917                         panic!("Tried to create outbound funding_created message on an inbound channel!");
5918                 }
5919                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5920                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5921                 }
5922                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5923                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5924                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5925                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5926                 }
5927
5928                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5929                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
5930
5931                 let signature = match self.get_funding_created_signature(logger) {
5932                         Ok(res) => res,
5933                         Err(e) => {
5934                                 log_error!(logger, "Got bad signatures: {:?}!", e);
5935                                 self.context.channel_transaction_parameters.funding_outpoint = None;
5936                                 return Err((self, e));
5937                         }
5938                 };
5939
5940                 let temporary_channel_id = self.context.channel_id;
5941
5942                 // Now that we're past error-generating stuff, update our local state:
5943
5944                 self.context.channel_state = ChannelState::FundingCreated as u32;
5945                 self.context.channel_id = funding_txo.to_channel_id();
5946
5947                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
5948                 // We can skip this if it is a zero-conf channel.
5949                 if funding_transaction.is_coin_base() &&
5950                         self.context.minimum_depth.unwrap_or(0) > 0 &&
5951                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5952                         self.context.minimum_depth = Some(COINBASE_MATURITY);
5953                 }
5954
5955                 self.context.funding_transaction = Some(funding_transaction);
5956                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
5957
5958                 let channel = Channel {
5959                         context: self.context,
5960                 };
5961
5962                 Ok((channel, msgs::FundingCreated {
5963                         temporary_channel_id,
5964                         funding_txid: funding_txo.txid,
5965                         funding_output_index: funding_txo.index,
5966                         signature,
5967                         #[cfg(taproot)]
5968                         partial_signature_with_nonce: None,
5969                         #[cfg(taproot)]
5970                         next_local_nonce: None,
5971                 }))
5972         }
5973
5974         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
5975                 // The default channel type (ie the first one we try) depends on whether the channel is
5976                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
5977                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
5978                 // with no other changes, and fall back to `only_static_remotekey`.
5979                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
5980                 if !config.channel_handshake_config.announced_channel &&
5981                         config.channel_handshake_config.negotiate_scid_privacy &&
5982                         their_features.supports_scid_privacy() {
5983                         ret.set_scid_privacy_required();
5984                 }
5985
5986                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
5987                 // set it now. If they don't understand it, we'll fall back to our default of
5988                 // `only_static_remotekey`.
5989                 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
5990                         their_features.supports_anchors_zero_fee_htlc_tx() {
5991                         ret.set_anchors_zero_fee_htlc_tx_required();
5992                 }
5993
5994                 ret
5995         }
5996
5997         /// If we receive an error message, it may only be a rejection of the channel type we tried,
5998         /// not of our ability to open any channel at all. Thus, on error, we should first call this
5999         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6000         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6001                 &mut self, chain_hash: BlockHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6002         ) -> Result<msgs::OpenChannel, ()>
6003         where
6004                 F::Target: FeeEstimator
6005         {
6006                 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6007                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6008                         // We've exhausted our options
6009                         return Err(());
6010                 }
6011                 // We support opening a few different types of channels. Try removing our additional
6012                 // features one by one until we've either arrived at our default or the counterparty has
6013                 // accepted one.
6014                 //
6015                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6016                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6017                 // checks whether the counterparty supports every feature, this would only happen if the
6018                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6019                 // whatever reason.
6020                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6021                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6022                         self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
6023                         assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6024                 } else if self.context.channel_type.supports_scid_privacy() {
6025                         self.context.channel_type.clear_scid_privacy();
6026                 } else {
6027                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6028                 }
6029                 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6030                 Ok(self.get_open_channel(chain_hash))
6031         }
6032
6033         pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
6034                 if !self.context.is_outbound() {
6035                         panic!("Tried to open a channel for an inbound channel?");
6036                 }
6037                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6038                         panic!("Cannot generate an open_channel after we've moved forward");
6039                 }
6040
6041                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6042                         panic!("Tried to send an open_channel for a channel that has already advanced");
6043                 }
6044
6045                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6046                 let keys = self.context.get_holder_pubkeys();
6047
6048                 msgs::OpenChannel {
6049                         chain_hash,
6050                         temporary_channel_id: self.context.channel_id,
6051                         funding_satoshis: self.context.channel_value_satoshis,
6052                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6053                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6054                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6055                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6056                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6057                         feerate_per_kw: self.context.feerate_per_kw as u32,
6058                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6059                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6060                         funding_pubkey: keys.funding_pubkey,
6061                         revocation_basepoint: keys.revocation_basepoint,
6062                         payment_point: keys.payment_point,
6063                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6064                         htlc_basepoint: keys.htlc_basepoint,
6065                         first_per_commitment_point,
6066                         channel_flags: if self.context.config.announced_channel {1} else {0},
6067                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6068                                 Some(script) => script.clone().into_inner(),
6069                                 None => Builder::new().into_script(),
6070                         }),
6071                         channel_type: Some(self.context.channel_type.clone()),
6072                 }
6073         }
6074
6075         // Message handlers
6076         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6077                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6078
6079                 // Check sanity of message fields:
6080                 if !self.context.is_outbound() {
6081                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6082                 }
6083                 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6084                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6085                 }
6086                 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6087                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6088                 }
6089                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6090                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6091                 }
6092                 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6093                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6094                 }
6095                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6096                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6097                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6098                 }
6099                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6100                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6101                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6102                 }
6103                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6104                 if msg.to_self_delay > max_delay_acceptable {
6105                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6106                 }
6107                 if msg.max_accepted_htlcs < 1 {
6108                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6109                 }
6110                 if msg.max_accepted_htlcs > MAX_HTLCS {
6111                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6112                 }
6113
6114                 // Now check against optional parameters as set by config...
6115                 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6116                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6117                 }
6118                 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6119                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6120                 }
6121                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6122                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6123                 }
6124                 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6125                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6126                 }
6127                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6128                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6129                 }
6130                 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6131                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6132                 }
6133                 if msg.minimum_depth > peer_limits.max_minimum_depth {
6134                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6135                 }
6136
6137                 if let Some(ty) = &msg.channel_type {
6138                         if *ty != self.context.channel_type {
6139                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6140                         }
6141                 } else if their_features.supports_channel_type() {
6142                         // Assume they've accepted the channel type as they said they understand it.
6143                 } else {
6144                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6145                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6146                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6147                         }
6148                         self.context.channel_type = channel_type.clone();
6149                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
6150                 }
6151
6152                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6153                         match &msg.shutdown_scriptpubkey {
6154                                 &Some(ref script) => {
6155                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6156                                         if script.len() == 0 {
6157                                                 None
6158                                         } else {
6159                                                 if !script::is_bolt2_compliant(&script, their_features) {
6160                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6161                                                 }
6162                                                 Some(script.clone())
6163                                         }
6164                                 },
6165                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6166                                 &None => {
6167                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6168                                 }
6169                         }
6170                 } else { None };
6171
6172                 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6173                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6174                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6175                 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6176                 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6177
6178                 if peer_limits.trust_own_funding_0conf {
6179                         self.context.minimum_depth = Some(msg.minimum_depth);
6180                 } else {
6181                         self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6182                 }
6183
6184                 let counterparty_pubkeys = ChannelPublicKeys {
6185                         funding_pubkey: msg.funding_pubkey,
6186                         revocation_basepoint: msg.revocation_basepoint,
6187                         payment_point: msg.payment_point,
6188                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6189                         htlc_basepoint: msg.htlc_basepoint
6190                 };
6191
6192                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6193                         selected_contest_delay: msg.to_self_delay,
6194                         pubkeys: counterparty_pubkeys,
6195                 });
6196
6197                 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6198                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6199
6200                 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6201                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6202
6203                 Ok(())
6204         }
6205 }
6206
6207 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6208 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6209         pub context: ChannelContext<SP>,
6210         pub unfunded_context: UnfundedChannelContext,
6211 }
6212
6213 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6214         /// Creates a new channel from a remote sides' request for one.
6215         /// Assumes chain_hash has already been checked and corresponds with what we expect!
6216         pub fn new<ES: Deref, F: Deref, L: Deref>(
6217                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6218                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6219                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6220                 current_chain_height: u32, logger: &L, is_0conf: bool,
6221         ) -> Result<InboundV1Channel<SP>, ChannelError>
6222                 where ES::Target: EntropySource,
6223                           F::Target: FeeEstimator,
6224                           L::Target: Logger,
6225         {
6226                 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6227
6228                 // First check the channel type is known, failing before we do anything else if we don't
6229                 // support this channel type.
6230                 let channel_type = if let Some(channel_type) = &msg.channel_type {
6231                         if channel_type.supports_any_optional_bits() {
6232                                 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6233                         }
6234
6235                         // We only support the channel types defined by the `ChannelManager` in
6236                         // `provided_channel_type_features`. The channel type must always support
6237                         // `static_remote_key`.
6238                         if !channel_type.requires_static_remote_key() {
6239                                 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6240                         }
6241                         // Make sure we support all of the features behind the channel type.
6242                         if !channel_type.is_subset(our_supported_features) {
6243                                 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6244                         }
6245                         if channel_type.requires_scid_privacy() && announced_channel {
6246                                 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6247                         }
6248                         channel_type.clone()
6249                 } else {
6250                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6251                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6252                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6253                         }
6254                         channel_type
6255                 };
6256
6257                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6258                 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6259                 let pubkeys = holder_signer.pubkeys().clone();
6260                 let counterparty_pubkeys = ChannelPublicKeys {
6261                         funding_pubkey: msg.funding_pubkey,
6262                         revocation_basepoint: msg.revocation_basepoint,
6263                         payment_point: msg.payment_point,
6264                         delayed_payment_basepoint: msg.delayed_payment_basepoint,
6265                         htlc_basepoint: msg.htlc_basepoint
6266                 };
6267
6268                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6269                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6270                 }
6271
6272                 // Check sanity of message fields:
6273                 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6274                         return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6275                 }
6276                 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6277                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6278                 }
6279                 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6280                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6281                 }
6282                 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6283                 if msg.push_msat > full_channel_value_msat {
6284                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6285                 }
6286                 if msg.dust_limit_satoshis > msg.funding_satoshis {
6287                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6288                 }
6289                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6290                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6291                 }
6292                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6293
6294                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6295                 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6296                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6297                 }
6298                 if msg.max_accepted_htlcs < 1 {
6299                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6300                 }
6301                 if msg.max_accepted_htlcs > MAX_HTLCS {
6302                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6303                 }
6304
6305                 // Now check against optional parameters as set by config...
6306                 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6307                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6308                 }
6309                 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6310                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
6311                 }
6312                 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6313                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6314                 }
6315                 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6316                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6317                 }
6318                 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6319                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6320                 }
6321                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6322                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6323                 }
6324                 if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
6325                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6326                 }
6327
6328                 // Convert things into internal flags and prep our state:
6329
6330                 if config.channel_handshake_limits.force_announced_channel_preference {
6331                         if config.channel_handshake_config.announced_channel != announced_channel {
6332                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6333                         }
6334                 }
6335
6336                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6337                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6338                         // Protocol level safety check in place, although it should never happen because
6339                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6340                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6341                 }
6342                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6343                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6344                 }
6345                 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6346                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6347                                 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6348                 }
6349                 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6350                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6351                 }
6352
6353                 // check if the funder's amount for the initial commitment tx is sufficient
6354                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6355                 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6356                 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6357                 if funders_amount_msat / 1000 < commitment_tx_fee {
6358                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
6359                 }
6360
6361                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
6362                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6363                 // want to push much to us), our counterparty should always have more than our reserve.
6364                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6365                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6366                 }
6367
6368                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6369                         match &msg.shutdown_scriptpubkey {
6370                                 &Some(ref script) => {
6371                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6372                                         if script.len() == 0 {
6373                                                 None
6374                                         } else {
6375                                                 if !script::is_bolt2_compliant(&script, their_features) {
6376                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6377                                                 }
6378                                                 Some(script.clone())
6379                                         }
6380                                 },
6381                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6382                                 &None => {
6383                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6384                                 }
6385                         }
6386                 } else { None };
6387
6388                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6389                         match signer_provider.get_shutdown_scriptpubkey() {
6390                                 Ok(scriptpubkey) => Some(scriptpubkey),
6391                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6392                         }
6393                 } else { None };
6394
6395                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6396                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6397                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6398                         }
6399                 }
6400
6401                 let destination_script = match signer_provider.get_destination_script() {
6402                         Ok(script) => script,
6403                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6404                 };
6405
6406                 let mut secp_ctx = Secp256k1::new();
6407                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6408
6409                 let minimum_depth = if is_0conf {
6410                         Some(0)
6411                 } else {
6412                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6413                 };
6414
6415                 let chan = Self {
6416                         context: ChannelContext {
6417                                 user_id,
6418
6419                                 config: LegacyChannelConfig {
6420                                         options: config.channel_config.clone(),
6421                                         announced_channel,
6422                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6423                                 },
6424
6425                                 prev_config: None,
6426
6427                                 inbound_handshake_limits_override: None,
6428
6429                                 temporary_channel_id: Some(msg.temporary_channel_id),
6430                                 channel_id: msg.temporary_channel_id,
6431                                 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6432                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6433                                 secp_ctx,
6434
6435                                 latest_monitor_update_id: 0,
6436
6437                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6438                                 shutdown_scriptpubkey,
6439                                 destination_script,
6440
6441                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6442                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6443                                 value_to_self_msat: msg.push_msat,
6444
6445                                 pending_inbound_htlcs: Vec::new(),
6446                                 pending_outbound_htlcs: Vec::new(),
6447                                 holding_cell_htlc_updates: Vec::new(),
6448                                 pending_update_fee: None,
6449                                 holding_cell_update_fee: None,
6450                                 next_holder_htlc_id: 0,
6451                                 next_counterparty_htlc_id: 0,
6452                                 update_time_counter: 1,
6453
6454                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6455
6456                                 monitor_pending_channel_ready: false,
6457                                 monitor_pending_revoke_and_ack: false,
6458                                 monitor_pending_commitment_signed: false,
6459                                 monitor_pending_forwards: Vec::new(),
6460                                 monitor_pending_failures: Vec::new(),
6461                                 monitor_pending_finalized_fulfills: Vec::new(),
6462
6463                                 #[cfg(debug_assertions)]
6464                                 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6465                                 #[cfg(debug_assertions)]
6466                                 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6467
6468                                 last_sent_closing_fee: None,
6469                                 pending_counterparty_closing_signed: None,
6470                                 closing_fee_limits: None,
6471                                 target_closing_feerate_sats_per_kw: None,
6472
6473                                 funding_tx_confirmed_in: None,
6474                                 funding_tx_confirmation_height: 0,
6475                                 short_channel_id: None,
6476                                 channel_creation_height: current_chain_height,
6477
6478                                 feerate_per_kw: msg.feerate_per_kw,
6479                                 channel_value_satoshis: msg.funding_satoshis,
6480                                 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6481                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6482                                 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6483                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6484                                 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6485                                 holder_selected_channel_reserve_satoshis,
6486                                 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6487                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6488                                 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6489                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6490                                 minimum_depth,
6491
6492                                 counterparty_forwarding_info: None,
6493
6494                                 channel_transaction_parameters: ChannelTransactionParameters {
6495                                         holder_pubkeys: pubkeys,
6496                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6497                                         is_outbound_from_holder: false,
6498                                         counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6499                                                 selected_contest_delay: msg.to_self_delay,
6500                                                 pubkeys: counterparty_pubkeys,
6501                                         }),
6502                                         funding_outpoint: None,
6503                                         channel_type_features: channel_type.clone()
6504                                 },
6505                                 funding_transaction: None,
6506                                 is_batch_funding: None,
6507
6508                                 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6509                                 counterparty_prev_commitment_point: None,
6510                                 counterparty_node_id,
6511
6512                                 counterparty_shutdown_scriptpubkey,
6513
6514                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6515
6516                                 channel_update_status: ChannelUpdateStatus::Enabled,
6517                                 closing_signed_in_flight: false,
6518
6519                                 announcement_sigs: None,
6520
6521                                 #[cfg(any(test, fuzzing))]
6522                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6523                                 #[cfg(any(test, fuzzing))]
6524                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6525
6526                                 workaround_lnd_bug_4006: None,
6527                                 sent_message_awaiting_response: None,
6528
6529                                 latest_inbound_scid_alias: None,
6530                                 outbound_scid_alias: 0,
6531
6532                                 channel_pending_event_emitted: false,
6533                                 channel_ready_event_emitted: false,
6534
6535                                 #[cfg(any(test, fuzzing))]
6536                                 historical_inbound_htlc_fulfills: HashSet::new(),
6537
6538                                 channel_type,
6539                                 channel_keys_id,
6540
6541                                 blocked_monitor_updates: Vec::new(),
6542                         },
6543                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6544                 };
6545
6546                 Ok(chan)
6547         }
6548
6549         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6550         /// should be sent back to the counterparty node.
6551         ///
6552         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6553         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6554                 if self.context.is_outbound() {
6555                         panic!("Tried to send accept_channel for an outbound channel?");
6556                 }
6557                 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6558                         panic!("Tried to send accept_channel after channel had moved forward");
6559                 }
6560                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6561                         panic!("Tried to send an accept_channel for a channel that has already advanced");
6562                 }
6563
6564                 self.generate_accept_channel_message()
6565         }
6566
6567         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6568         /// inbound channel. If the intention is to accept an inbound channel, use
6569         /// [`InboundV1Channel::accept_inbound_channel`] instead.
6570         ///
6571         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6572         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6573                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6574                 let keys = self.context.get_holder_pubkeys();
6575
6576                 msgs::AcceptChannel {
6577                         temporary_channel_id: self.context.channel_id,
6578                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6579                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6580                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6581                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6582                         minimum_depth: self.context.minimum_depth.unwrap(),
6583                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6584                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6585                         funding_pubkey: keys.funding_pubkey,
6586                         revocation_basepoint: keys.revocation_basepoint,
6587                         payment_point: keys.payment_point,
6588                         delayed_payment_basepoint: keys.delayed_payment_basepoint,
6589                         htlc_basepoint: keys.htlc_basepoint,
6590                         first_per_commitment_point,
6591                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6592                                 Some(script) => script.clone().into_inner(),
6593                                 None => Builder::new().into_script(),
6594                         }),
6595                         channel_type: Some(self.context.channel_type.clone()),
6596                         #[cfg(taproot)]
6597                         next_local_nonce: None,
6598                 }
6599         }
6600
6601         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6602         /// inbound channel without accepting it.
6603         ///
6604         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6605         #[cfg(test)]
6606         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6607                 self.generate_accept_channel_message()
6608         }
6609
6610         fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
6611                 let funding_script = self.context.get_funding_redeemscript();
6612
6613                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6614                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6615                 {
6616                         let trusted_tx = initial_commitment_tx.trust();
6617                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6618                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6619                         // They sign the holder commitment transaction...
6620                         log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6621                                 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6622                                 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6623                                 encode::serialize_hex(&funding_script), &self.context.channel_id());
6624                         secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6625                 }
6626
6627                 let counterparty_keys = self.context.build_remote_transaction_keys();
6628                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6629
6630                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6631                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6632                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6633                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6634
6635                 match &self.context.holder_signer {
6636                         // TODO (arik): move match into calling method for Taproot
6637                         ChannelSignerType::Ecdsa(ecdsa) => {
6638                                 let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
6639                                         .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
6640
6641                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
6642                                 Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature))
6643                         }
6644                 }
6645         }
6646
6647         pub fn funding_created<L: Deref>(
6648                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6649         ) -> Result<(Channel<SP>, msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
6650         where
6651                 L::Target: Logger
6652         {
6653                 if self.context.is_outbound() {
6654                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6655                 }
6656                 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6657                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6658                         // remember the channel, so it's safe to just send an error_message here and drop the
6659                         // channel.
6660                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6661                 }
6662                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6663                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6664                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6665                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6666                 }
6667
6668                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6669                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6670                 // This is an externally observable change before we finish all our checks.  In particular
6671                 // funding_created_signature may fail.
6672                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6673
6674                 let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
6675                         Ok(res) => res,
6676                         Err(ChannelError::Close(e)) => {
6677                                 self.context.channel_transaction_parameters.funding_outpoint = None;
6678                                 return Err((self, ChannelError::Close(e)));
6679                         },
6680                         Err(e) => {
6681                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
6682                                 // to make sure we don't continue with an inconsistent state.
6683                                 panic!("unexpected error type from funding_created_signature {:?}", e);
6684                         }
6685                 };
6686
6687                 let holder_commitment_tx = HolderCommitmentTransaction::new(
6688                         initial_commitment_tx,
6689                         msg.signature,
6690                         Vec::new(),
6691                         &self.context.get_holder_pubkeys().funding_pubkey,
6692                         self.context.counterparty_funding_pubkey()
6693                 );
6694
6695                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6696                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6697                 }
6698
6699                 // Now that we're past error-generating stuff, update our local state:
6700
6701                 let funding_redeemscript = self.context.get_funding_redeemscript();
6702                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6703                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6704                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6705                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6706                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6707                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6708                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
6709                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6710                                                           &self.context.channel_transaction_parameters,
6711                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
6712                                                           obscure_factor,
6713                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
6714
6715                 channel_monitor.provide_initial_counterparty_commitment_tx(
6716                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6717                         self.context.cur_counterparty_commitment_transaction_number,
6718                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6719                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6720                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6721
6722                 self.context.channel_state = ChannelState::FundingSent as u32;
6723                 self.context.channel_id = funding_txo.to_channel_id();
6724                 self.context.cur_counterparty_commitment_transaction_number -= 1;
6725                 self.context.cur_holder_commitment_transaction_number -= 1;
6726
6727                 log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
6728
6729                 // Promote the channel to a full-fledged one now that we have updated the state and have a
6730                 // `ChannelMonitor`.
6731                 let mut channel = Channel {
6732                         context: self.context,
6733                 };
6734                 let channel_id = channel.context.channel_id.clone();
6735                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6736                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6737
6738                 Ok((channel, msgs::FundingSigned {
6739                         channel_id,
6740                         signature,
6741                         #[cfg(taproot)]
6742                         partial_signature_with_nonce: None,
6743                 }, channel_monitor))
6744         }
6745 }
6746
6747 const SERIALIZATION_VERSION: u8 = 3;
6748 const MIN_SERIALIZATION_VERSION: u8 = 2;
6749
6750 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6751         (0, FailRelay),
6752         (1, FailMalformed),
6753         (2, Fulfill),
6754 );
6755
6756 impl Writeable for ChannelUpdateStatus {
6757         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6758                 // We only care about writing out the current state as it was announced, ie only either
6759                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6760                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6761                 match self {
6762                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6763                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6764                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6765                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6766                 }
6767                 Ok(())
6768         }
6769 }
6770
6771 impl Readable for ChannelUpdateStatus {
6772         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6773                 Ok(match <u8 as Readable>::read(reader)? {
6774                         0 => ChannelUpdateStatus::Enabled,
6775                         1 => ChannelUpdateStatus::Disabled,
6776                         _ => return Err(DecodeError::InvalidValue),
6777                 })
6778         }
6779 }
6780
6781 impl Writeable for AnnouncementSigsState {
6782         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6783                 // We only care about writing out the current state as if we had just disconnected, at
6784                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6785                 match self {
6786                         AnnouncementSigsState::NotSent => 0u8.write(writer),
6787                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
6788                         AnnouncementSigsState::Committed => 0u8.write(writer),
6789                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6790                 }
6791         }
6792 }
6793
6794 impl Readable for AnnouncementSigsState {
6795         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6796                 Ok(match <u8 as Readable>::read(reader)? {
6797                         0 => AnnouncementSigsState::NotSent,
6798                         1 => AnnouncementSigsState::PeerReceived,
6799                         _ => return Err(DecodeError::InvalidValue),
6800                 })
6801         }
6802 }
6803
6804 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
6805         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6806                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6807                 // called.
6808
6809                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6810
6811                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6812                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6813                 // the low bytes now and the optional high bytes later.
6814                 let user_id_low = self.context.user_id as u64;
6815                 user_id_low.write(writer)?;
6816
6817                 // Version 1 deserializers expected to read parts of the config object here. Version 2
6818                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6819                 // `minimum_depth` we simply write dummy values here.
6820                 writer.write_all(&[0; 8])?;
6821
6822                 self.context.channel_id.write(writer)?;
6823                 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6824                 self.context.channel_value_satoshis.write(writer)?;
6825
6826                 self.context.latest_monitor_update_id.write(writer)?;
6827
6828                 let mut key_data = VecWriter(Vec::new());
6829                 // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
6830                 self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
6831                 assert!(key_data.0.len() < core::usize::MAX);
6832                 assert!(key_data.0.len() < core::u32::MAX as usize);
6833                 (key_data.0.len() as u32).write(writer)?;
6834                 writer.write_all(&key_data.0[..])?;
6835
6836                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6837                 // deserialized from that format.
6838                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6839                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6840                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6841                 }
6842                 self.context.destination_script.write(writer)?;
6843
6844                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6845                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6846                 self.context.value_to_self_msat.write(writer)?;
6847
6848                 let mut dropped_inbound_htlcs = 0;
6849                 for htlc in self.context.pending_inbound_htlcs.iter() {
6850                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6851                                 dropped_inbound_htlcs += 1;
6852                         }
6853                 }
6854                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6855                 for htlc in self.context.pending_inbound_htlcs.iter() {
6856                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6857                                 continue; // Drop
6858                         }
6859                         htlc.htlc_id.write(writer)?;
6860                         htlc.amount_msat.write(writer)?;
6861                         htlc.cltv_expiry.write(writer)?;
6862                         htlc.payment_hash.write(writer)?;
6863                         match &htlc.state {
6864                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6865                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6866                                         1u8.write(writer)?;
6867                                         htlc_state.write(writer)?;
6868                                 },
6869                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6870                                         2u8.write(writer)?;
6871                                         htlc_state.write(writer)?;
6872                                 },
6873                                 &InboundHTLCState::Committed => {
6874                                         3u8.write(writer)?;
6875                                 },
6876                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6877                                         4u8.write(writer)?;
6878                                         removal_reason.write(writer)?;
6879                                 },
6880                         }
6881                 }
6882
6883                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6884                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
6885
6886                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6887                 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
6888                         htlc.htlc_id.write(writer)?;
6889                         htlc.amount_msat.write(writer)?;
6890                         htlc.cltv_expiry.write(writer)?;
6891                         htlc.payment_hash.write(writer)?;
6892                         htlc.source.write(writer)?;
6893                         match &htlc.state {
6894                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6895                                         0u8.write(writer)?;
6896                                         onion_packet.write(writer)?;
6897                                 },
6898                                 &OutboundHTLCState::Committed => {
6899                                         1u8.write(writer)?;
6900                                 },
6901                                 &OutboundHTLCState::RemoteRemoved(_) => {
6902                                         // Treat this as a Committed because we haven't received the CS - they'll
6903                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6904                                         1u8.write(writer)?;
6905                                 },
6906                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6907                                         3u8.write(writer)?;
6908                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6909                                                 preimages.push(preimage);
6910                                         }
6911                                         let reason: Option<&HTLCFailReason> = outcome.into();
6912                                         reason.write(writer)?;
6913                                 }
6914                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6915                                         4u8.write(writer)?;
6916                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
6917                                                 preimages.push(preimage);
6918                                         }
6919                                         let reason: Option<&HTLCFailReason> = outcome.into();
6920                                         reason.write(writer)?;
6921                                 }
6922                         }
6923                         if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
6924                                 if pending_outbound_skimmed_fees.is_empty() {
6925                                         for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
6926                                 }
6927                                 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
6928                         } else if !pending_outbound_skimmed_fees.is_empty() {
6929                                 pending_outbound_skimmed_fees.push(None);
6930                         }
6931                 }
6932
6933                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
6934                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
6935                 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
6936                         match update {
6937                                 &HTLCUpdateAwaitingACK::AddHTLC {
6938                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
6939                                         skimmed_fee_msat,
6940                                 } => {
6941                                         0u8.write(writer)?;
6942                                         amount_msat.write(writer)?;
6943                                         cltv_expiry.write(writer)?;
6944                                         payment_hash.write(writer)?;
6945                                         source.write(writer)?;
6946                                         onion_routing_packet.write(writer)?;
6947
6948                                         if let Some(skimmed_fee) = skimmed_fee_msat {
6949                                                 if holding_cell_skimmed_fees.is_empty() {
6950                                                         for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
6951                                                 }
6952                                                 holding_cell_skimmed_fees.push(Some(skimmed_fee));
6953                                         } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
6954                                 },
6955                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
6956                                         1u8.write(writer)?;
6957                                         payment_preimage.write(writer)?;
6958                                         htlc_id.write(writer)?;
6959                                 },
6960                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
6961                                         2u8.write(writer)?;
6962                                         htlc_id.write(writer)?;
6963                                         err_packet.write(writer)?;
6964                                 }
6965                         }
6966                 }
6967
6968                 match self.context.resend_order {
6969                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
6970                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
6971                 }
6972
6973                 self.context.monitor_pending_channel_ready.write(writer)?;
6974                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
6975                 self.context.monitor_pending_commitment_signed.write(writer)?;
6976
6977                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
6978                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
6979                         pending_forward.write(writer)?;
6980                         htlc_id.write(writer)?;
6981                 }
6982
6983                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
6984                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
6985                         htlc_source.write(writer)?;
6986                         payment_hash.write(writer)?;
6987                         fail_reason.write(writer)?;
6988                 }
6989
6990                 if self.context.is_outbound() {
6991                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
6992                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
6993                         Some(feerate).write(writer)?;
6994                 } else {
6995                         // As for inbound HTLCs, if the update was only announced and never committed in a
6996                         // commitment_signed, drop it.
6997                         None::<u32>.write(writer)?;
6998                 }
6999                 self.context.holding_cell_update_fee.write(writer)?;
7000
7001                 self.context.next_holder_htlc_id.write(writer)?;
7002                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7003                 self.context.update_time_counter.write(writer)?;
7004                 self.context.feerate_per_kw.write(writer)?;
7005
7006                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7007                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7008                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7009                 // consider the stale state on reload.
7010                 0u8.write(writer)?;
7011
7012                 self.context.funding_tx_confirmed_in.write(writer)?;
7013                 self.context.funding_tx_confirmation_height.write(writer)?;
7014                 self.context.short_channel_id.write(writer)?;
7015
7016                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7017                 self.context.holder_dust_limit_satoshis.write(writer)?;
7018                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7019
7020                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7021                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7022
7023                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7024                 self.context.holder_htlc_minimum_msat.write(writer)?;
7025                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7026
7027                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7028                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7029
7030                 match &self.context.counterparty_forwarding_info {
7031                         Some(info) => {
7032                                 1u8.write(writer)?;
7033                                 info.fee_base_msat.write(writer)?;
7034                                 info.fee_proportional_millionths.write(writer)?;
7035                                 info.cltv_expiry_delta.write(writer)?;
7036                         },
7037                         None => 0u8.write(writer)?
7038                 }
7039
7040                 self.context.channel_transaction_parameters.write(writer)?;
7041                 self.context.funding_transaction.write(writer)?;
7042
7043                 self.context.counterparty_cur_commitment_point.write(writer)?;
7044                 self.context.counterparty_prev_commitment_point.write(writer)?;
7045                 self.context.counterparty_node_id.write(writer)?;
7046
7047                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7048
7049                 self.context.commitment_secrets.write(writer)?;
7050
7051                 self.context.channel_update_status.write(writer)?;
7052
7053                 #[cfg(any(test, fuzzing))]
7054                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7055                 #[cfg(any(test, fuzzing))]
7056                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7057                         htlc.write(writer)?;
7058                 }
7059
7060                 // If the channel type is something other than only-static-remote-key, then we need to have
7061                 // older clients fail to deserialize this channel at all. If the type is
7062                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7063                 // out at all.
7064                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7065                         Some(&self.context.channel_type) } else { None };
7066
7067                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7068                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7069                 // a different percentage of the channel value then 10%, which older versions of LDK used
7070                 // to set it to before the percentage was made configurable.
7071                 let serialized_holder_selected_reserve =
7072                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7073                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7074
7075                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7076                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7077                 let serialized_holder_htlc_max_in_flight =
7078                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7079                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7080
7081                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7082                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7083
7084                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7085                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7086                 // we write the high bytes as an option here.
7087                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7088
7089                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7090
7091                 write_tlv_fields!(writer, {
7092                         (0, self.context.announcement_sigs, option),
7093                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7094                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
7095                         // them twice, once with their original default values above, and once as an option
7096                         // here. On the read side, old versions will simply ignore the odd-type entries here,
7097                         // and new versions map the default values to None and allow the TLV entries here to
7098                         // override that.
7099                         (1, self.context.minimum_depth, option),
7100                         (2, chan_type, option),
7101                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7102                         (4, serialized_holder_selected_reserve, option),
7103                         (5, self.context.config, required),
7104                         (6, serialized_holder_htlc_max_in_flight, option),
7105                         (7, self.context.shutdown_scriptpubkey, option),
7106                         (8, self.context.blocked_monitor_updates, optional_vec),
7107                         (9, self.context.target_closing_feerate_sats_per_kw, option),
7108                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7109                         (13, self.context.channel_creation_height, required),
7110                         (15, preimages, required_vec),
7111                         (17, self.context.announcement_sigs_state, required),
7112                         (19, self.context.latest_inbound_scid_alias, option),
7113                         (21, self.context.outbound_scid_alias, required),
7114                         (23, channel_ready_event_emitted, option),
7115                         (25, user_id_high_opt, option),
7116                         (27, self.context.channel_keys_id, required),
7117                         (28, holder_max_accepted_htlcs, option),
7118                         (29, self.context.temporary_channel_id, option),
7119                         (31, channel_pending_event_emitted, option),
7120                         (35, pending_outbound_skimmed_fees, optional_vec),
7121                         (37, holding_cell_skimmed_fees, optional_vec),
7122                         (38, self.context.is_batch_funding, option),
7123                 });
7124
7125                 Ok(())
7126         }
7127 }
7128
7129 const MAX_ALLOC_SIZE: usize = 64*1024;
7130 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7131                 where
7132                         ES::Target: EntropySource,
7133                         SP::Target: SignerProvider
7134 {
7135         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7136                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7137                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7138
7139                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7140                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7141                 // the low bytes now and the high bytes later.
7142                 let user_id_low: u64 = Readable::read(reader)?;
7143
7144                 let mut config = Some(LegacyChannelConfig::default());
7145                 if ver == 1 {
7146                         // Read the old serialization of the ChannelConfig from version 0.0.98.
7147                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7148                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7149                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7150                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7151                 } else {
7152                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7153                         let mut _val: u64 = Readable::read(reader)?;
7154                 }
7155
7156                 let channel_id = Readable::read(reader)?;
7157                 let channel_state = Readable::read(reader)?;
7158                 let channel_value_satoshis = Readable::read(reader)?;
7159
7160                 let latest_monitor_update_id = Readable::read(reader)?;
7161
7162                 let mut keys_data = None;
7163                 if ver <= 2 {
7164                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7165                         // the `channel_keys_id` TLV is present below.
7166                         let keys_len: u32 = Readable::read(reader)?;
7167                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7168                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
7169                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7170                                 let mut data = [0; 1024];
7171                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7172                                 reader.read_exact(read_slice)?;
7173                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7174                         }
7175                 }
7176
7177                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7178                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7179                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7180                         Err(_) => None,
7181                 };
7182                 let destination_script = Readable::read(reader)?;
7183
7184                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7185                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7186                 let value_to_self_msat = Readable::read(reader)?;
7187
7188                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7189
7190                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7191                 for _ in 0..pending_inbound_htlc_count {
7192                         pending_inbound_htlcs.push(InboundHTLCOutput {
7193                                 htlc_id: Readable::read(reader)?,
7194                                 amount_msat: Readable::read(reader)?,
7195                                 cltv_expiry: Readable::read(reader)?,
7196                                 payment_hash: Readable::read(reader)?,
7197                                 state: match <u8 as Readable>::read(reader)? {
7198                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7199                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7200                                         3 => InboundHTLCState::Committed,
7201                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7202                                         _ => return Err(DecodeError::InvalidValue),
7203                                 },
7204                         });
7205                 }
7206
7207                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7208                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7209                 for _ in 0..pending_outbound_htlc_count {
7210                         pending_outbound_htlcs.push(OutboundHTLCOutput {
7211                                 htlc_id: Readable::read(reader)?,
7212                                 amount_msat: Readable::read(reader)?,
7213                                 cltv_expiry: Readable::read(reader)?,
7214                                 payment_hash: Readable::read(reader)?,
7215                                 source: Readable::read(reader)?,
7216                                 state: match <u8 as Readable>::read(reader)? {
7217                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7218                                         1 => OutboundHTLCState::Committed,
7219                                         2 => {
7220                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7221                                                 OutboundHTLCState::RemoteRemoved(option.into())
7222                                         },
7223                                         3 => {
7224                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7225                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7226                                         },
7227                                         4 => {
7228                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7229                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7230                                         },
7231                                         _ => return Err(DecodeError::InvalidValue),
7232                                 },
7233                                 skimmed_fee_msat: None,
7234                         });
7235                 }
7236
7237                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7238                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7239                 for _ in 0..holding_cell_htlc_update_count {
7240                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7241                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
7242                                         amount_msat: Readable::read(reader)?,
7243                                         cltv_expiry: Readable::read(reader)?,
7244                                         payment_hash: Readable::read(reader)?,
7245                                         source: Readable::read(reader)?,
7246                                         onion_routing_packet: Readable::read(reader)?,
7247                                         skimmed_fee_msat: None,
7248                                 },
7249                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7250                                         payment_preimage: Readable::read(reader)?,
7251                                         htlc_id: Readable::read(reader)?,
7252                                 },
7253                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
7254                                         htlc_id: Readable::read(reader)?,
7255                                         err_packet: Readable::read(reader)?,
7256                                 },
7257                                 _ => return Err(DecodeError::InvalidValue),
7258                         });
7259                 }
7260
7261                 let resend_order = match <u8 as Readable>::read(reader)? {
7262                         0 => RAACommitmentOrder::CommitmentFirst,
7263                         1 => RAACommitmentOrder::RevokeAndACKFirst,
7264                         _ => return Err(DecodeError::InvalidValue),
7265                 };
7266
7267                 let monitor_pending_channel_ready = Readable::read(reader)?;
7268                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7269                 let monitor_pending_commitment_signed = Readable::read(reader)?;
7270
7271                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7272                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7273                 for _ in 0..monitor_pending_forwards_count {
7274                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7275                 }
7276
7277                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7278                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7279                 for _ in 0..monitor_pending_failures_count {
7280                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7281                 }
7282
7283                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7284
7285                 let holding_cell_update_fee = Readable::read(reader)?;
7286
7287                 let next_holder_htlc_id = Readable::read(reader)?;
7288                 let next_counterparty_htlc_id = Readable::read(reader)?;
7289                 let update_time_counter = Readable::read(reader)?;
7290                 let feerate_per_kw = Readable::read(reader)?;
7291
7292                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7293                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7294                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7295                 // consider the stale state on reload.
7296                 match <u8 as Readable>::read(reader)? {
7297                         0 => {},
7298                         1 => {
7299                                 let _: u32 = Readable::read(reader)?;
7300                                 let _: u64 = Readable::read(reader)?;
7301                                 let _: Signature = Readable::read(reader)?;
7302                         },
7303                         _ => return Err(DecodeError::InvalidValue),
7304                 }
7305
7306                 let funding_tx_confirmed_in = Readable::read(reader)?;
7307                 let funding_tx_confirmation_height = Readable::read(reader)?;
7308                 let short_channel_id = Readable::read(reader)?;
7309
7310                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7311                 let holder_dust_limit_satoshis = Readable::read(reader)?;
7312                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7313                 let mut counterparty_selected_channel_reserve_satoshis = None;
7314                 if ver == 1 {
7315                         // Read the old serialization from version 0.0.98.
7316                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7317                 } else {
7318                         // Read the 8 bytes of backwards-compatibility data.
7319                         let _dummy: u64 = Readable::read(reader)?;
7320                 }
7321                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7322                 let holder_htlc_minimum_msat = Readable::read(reader)?;
7323                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7324
7325                 let mut minimum_depth = None;
7326                 if ver == 1 {
7327                         // Read the old serialization from version 0.0.98.
7328                         minimum_depth = Some(Readable::read(reader)?);
7329                 } else {
7330                         // Read the 4 bytes of backwards-compatibility data.
7331                         let _dummy: u32 = Readable::read(reader)?;
7332                 }
7333
7334                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7335                         0 => None,
7336                         1 => Some(CounterpartyForwardingInfo {
7337                                 fee_base_msat: Readable::read(reader)?,
7338                                 fee_proportional_millionths: Readable::read(reader)?,
7339                                 cltv_expiry_delta: Readable::read(reader)?,
7340                         }),
7341                         _ => return Err(DecodeError::InvalidValue),
7342                 };
7343
7344                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7345                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7346
7347                 let counterparty_cur_commitment_point = Readable::read(reader)?;
7348
7349                 let counterparty_prev_commitment_point = Readable::read(reader)?;
7350                 let counterparty_node_id = Readable::read(reader)?;
7351
7352                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7353                 let commitment_secrets = Readable::read(reader)?;
7354
7355                 let channel_update_status = Readable::read(reader)?;
7356
7357                 #[cfg(any(test, fuzzing))]
7358                 let mut historical_inbound_htlc_fulfills = HashSet::new();
7359                 #[cfg(any(test, fuzzing))]
7360                 {
7361                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
7362                         for _ in 0..htlc_fulfills_len {
7363                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7364                         }
7365                 }
7366
7367                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7368                         Some((feerate, if channel_parameters.is_outbound_from_holder {
7369                                 FeeUpdateState::Outbound
7370                         } else {
7371                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7372                         }))
7373                 } else {
7374                         None
7375                 };
7376
7377                 let mut announcement_sigs = None;
7378                 let mut target_closing_feerate_sats_per_kw = None;
7379                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7380                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7381                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7382                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7383                 // only, so we default to that if none was written.
7384                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7385                 let mut channel_creation_height = Some(serialized_height);
7386                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7387
7388                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7389                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7390                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7391                 let mut latest_inbound_scid_alias = None;
7392                 let mut outbound_scid_alias = None;
7393                 let mut channel_pending_event_emitted = None;
7394                 let mut channel_ready_event_emitted = None;
7395
7396                 let mut user_id_high_opt: Option<u64> = None;
7397                 let mut channel_keys_id: Option<[u8; 32]> = None;
7398                 let mut temporary_channel_id: Option<ChannelId> = None;
7399                 let mut holder_max_accepted_htlcs: Option<u16> = None;
7400
7401                 let mut blocked_monitor_updates = Some(Vec::new());
7402
7403                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7404                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7405
7406                 let mut is_batch_funding: Option<()> = None;
7407
7408                 read_tlv_fields!(reader, {
7409                         (0, announcement_sigs, option),
7410                         (1, minimum_depth, option),
7411                         (2, channel_type, option),
7412                         (3, counterparty_selected_channel_reserve_satoshis, option),
7413                         (4, holder_selected_channel_reserve_satoshis, option),
7414                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7415                         (6, holder_max_htlc_value_in_flight_msat, option),
7416                         (7, shutdown_scriptpubkey, option),
7417                         (8, blocked_monitor_updates, optional_vec),
7418                         (9, target_closing_feerate_sats_per_kw, option),
7419                         (11, monitor_pending_finalized_fulfills, optional_vec),
7420                         (13, channel_creation_height, option),
7421                         (15, preimages_opt, optional_vec),
7422                         (17, announcement_sigs_state, option),
7423                         (19, latest_inbound_scid_alias, option),
7424                         (21, outbound_scid_alias, option),
7425                         (23, channel_ready_event_emitted, option),
7426                         (25, user_id_high_opt, option),
7427                         (27, channel_keys_id, option),
7428                         (28, holder_max_accepted_htlcs, option),
7429                         (29, temporary_channel_id, option),
7430                         (31, channel_pending_event_emitted, option),
7431                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
7432                         (37, holding_cell_skimmed_fees_opt, optional_vec),
7433                         (38, is_batch_funding, option),
7434                 });
7435
7436                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7437                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7438                         // If we've gotten to the funding stage of the channel, populate the signer with its
7439                         // required channel parameters.
7440                         let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7441                         if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7442                                 holder_signer.provide_channel_parameters(&channel_parameters);
7443                         }
7444                         (channel_keys_id, holder_signer)
7445                 } else {
7446                         // `keys_data` can be `None` if we had corrupted data.
7447                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7448                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7449                         (holder_signer.channel_keys_id(), holder_signer)
7450                 };
7451
7452                 if let Some(preimages) = preimages_opt {
7453                         let mut iter = preimages.into_iter();
7454                         for htlc in pending_outbound_htlcs.iter_mut() {
7455                                 match &htlc.state {
7456                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7457                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7458                                         }
7459                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7460                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7461                                         }
7462                                         _ => {}
7463                                 }
7464                         }
7465                         // We expect all preimages to be consumed above
7466                         if iter.next().is_some() {
7467                                 return Err(DecodeError::InvalidValue);
7468                         }
7469                 }
7470
7471                 let chan_features = channel_type.as_ref().unwrap();
7472                 if !chan_features.is_subset(our_supported_features) {
7473                         // If the channel was written by a new version and negotiated with features we don't
7474                         // understand yet, refuse to read it.
7475                         return Err(DecodeError::UnknownRequiredFeature);
7476                 }
7477
7478                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7479                 // To account for that, we're proactively setting/overriding the field here.
7480                 channel_parameters.channel_type_features = chan_features.clone();
7481
7482                 let mut secp_ctx = Secp256k1::new();
7483                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7484
7485                 // `user_id` used to be a single u64 value. In order to remain backwards
7486                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7487                 // separate u64 values.
7488                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7489
7490                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7491
7492                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7493                         let mut iter = skimmed_fees.into_iter();
7494                         for htlc in pending_outbound_htlcs.iter_mut() {
7495                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7496                         }
7497                         // We expect all skimmed fees to be consumed above
7498                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7499                 }
7500                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7501                         let mut iter = skimmed_fees.into_iter();
7502                         for htlc in holding_cell_htlc_updates.iter_mut() {
7503                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7504                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7505                                 }
7506                         }
7507                         // We expect all skimmed fees to be consumed above
7508                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7509                 }
7510
7511                 Ok(Channel {
7512                         context: ChannelContext {
7513                                 user_id,
7514
7515                                 config: config.unwrap(),
7516
7517                                 prev_config: None,
7518
7519                                 // Note that we don't care about serializing handshake limits as we only ever serialize
7520                                 // channel data after the handshake has completed.
7521                                 inbound_handshake_limits_override: None,
7522
7523                                 channel_id,
7524                                 temporary_channel_id,
7525                                 channel_state,
7526                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
7527                                 secp_ctx,
7528                                 channel_value_satoshis,
7529
7530                                 latest_monitor_update_id,
7531
7532                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7533                                 shutdown_scriptpubkey,
7534                                 destination_script,
7535
7536                                 cur_holder_commitment_transaction_number,
7537                                 cur_counterparty_commitment_transaction_number,
7538                                 value_to_self_msat,
7539
7540                                 holder_max_accepted_htlcs,
7541                                 pending_inbound_htlcs,
7542                                 pending_outbound_htlcs,
7543                                 holding_cell_htlc_updates,
7544
7545                                 resend_order,
7546
7547                                 monitor_pending_channel_ready,
7548                                 monitor_pending_revoke_and_ack,
7549                                 monitor_pending_commitment_signed,
7550                                 monitor_pending_forwards,
7551                                 monitor_pending_failures,
7552                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7553
7554                                 pending_update_fee,
7555                                 holding_cell_update_fee,
7556                                 next_holder_htlc_id,
7557                                 next_counterparty_htlc_id,
7558                                 update_time_counter,
7559                                 feerate_per_kw,
7560
7561                                 #[cfg(debug_assertions)]
7562                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7563                                 #[cfg(debug_assertions)]
7564                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7565
7566                                 last_sent_closing_fee: None,
7567                                 pending_counterparty_closing_signed: None,
7568                                 closing_fee_limits: None,
7569                                 target_closing_feerate_sats_per_kw,
7570
7571                                 funding_tx_confirmed_in,
7572                                 funding_tx_confirmation_height,
7573                                 short_channel_id,
7574                                 channel_creation_height: channel_creation_height.unwrap(),
7575
7576                                 counterparty_dust_limit_satoshis,
7577                                 holder_dust_limit_satoshis,
7578                                 counterparty_max_htlc_value_in_flight_msat,
7579                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7580                                 counterparty_selected_channel_reserve_satoshis,
7581                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7582                                 counterparty_htlc_minimum_msat,
7583                                 holder_htlc_minimum_msat,
7584                                 counterparty_max_accepted_htlcs,
7585                                 minimum_depth,
7586
7587                                 counterparty_forwarding_info,
7588
7589                                 channel_transaction_parameters: channel_parameters,
7590                                 funding_transaction,
7591                                 is_batch_funding,
7592
7593                                 counterparty_cur_commitment_point,
7594                                 counterparty_prev_commitment_point,
7595                                 counterparty_node_id,
7596
7597                                 counterparty_shutdown_scriptpubkey,
7598
7599                                 commitment_secrets,
7600
7601                                 channel_update_status,
7602                                 closing_signed_in_flight: false,
7603
7604                                 announcement_sigs,
7605
7606                                 #[cfg(any(test, fuzzing))]
7607                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7608                                 #[cfg(any(test, fuzzing))]
7609                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7610
7611                                 workaround_lnd_bug_4006: None,
7612                                 sent_message_awaiting_response: None,
7613
7614                                 latest_inbound_scid_alias,
7615                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7616                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7617
7618                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7619                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7620
7621                                 #[cfg(any(test, fuzzing))]
7622                                 historical_inbound_htlc_fulfills,
7623
7624                                 channel_type: channel_type.unwrap(),
7625                                 channel_keys_id,
7626
7627                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7628                         }
7629                 })
7630         }
7631 }
7632
7633 #[cfg(test)]
7634 mod tests {
7635         use std::cmp;
7636         use bitcoin::blockdata::script::{Script, Builder};
7637         use bitcoin::blockdata::transaction::{Transaction, TxOut};
7638         use bitcoin::blockdata::constants::genesis_block;
7639         use bitcoin::blockdata::opcodes;
7640         use bitcoin::network::constants::Network;
7641         use hex;
7642         use crate::ln::PaymentHash;
7643         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7644         use crate::ln::channel::InitFeatures;
7645         use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7646         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7647         use crate::ln::features::ChannelTypeFeatures;
7648         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7649         use crate::ln::script::ShutdownScript;
7650         use crate::ln::chan_utils;
7651         use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7652         use crate::chain::BestBlock;
7653         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7654         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7655         use crate::chain::transaction::OutPoint;
7656         use crate::routing::router::Path;
7657         use crate::util::config::UserConfig;
7658         use crate::util::errors::APIError;
7659         use crate::util::test_utils;
7660         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7661         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7662         use bitcoin::secp256k1::ffi::Signature as FFISignature;
7663         use bitcoin::secp256k1::{SecretKey,PublicKey};
7664         use bitcoin::hashes::sha256::Hash as Sha256;
7665         use bitcoin::hashes::Hash;
7666         use bitcoin::hash_types::WPubkeyHash;
7667         use bitcoin::PackedLockTime;
7668         use bitcoin::util::address::WitnessVersion;
7669         use crate::prelude::*;
7670
7671         struct TestFeeEstimator {
7672                 fee_est: u32
7673         }
7674         impl FeeEstimator for TestFeeEstimator {
7675                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7676                         self.fee_est
7677                 }
7678         }
7679
7680         #[test]
7681         fn test_max_funding_satoshis_no_wumbo() {
7682                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7683                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7684                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7685         }
7686
7687         #[test]
7688         fn test_no_fee_check_overflow() {
7689                 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7690                 // arithmetic, causing a panic with debug assertions enabled.
7691                 let fee_est = TestFeeEstimator { fee_est: 42 };
7692                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7693                 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
7694                         &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
7695                         u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7696         }
7697
7698         struct Keys {
7699                 signer: InMemorySigner,
7700         }
7701
7702         impl EntropySource for Keys {
7703                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7704         }
7705
7706         impl SignerProvider for Keys {
7707                 type Signer = InMemorySigner;
7708
7709                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7710                         self.signer.channel_keys_id()
7711                 }
7712
7713                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7714                         self.signer.clone()
7715                 }
7716
7717                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7718
7719                 fn get_destination_script(&self) -> Result<Script, ()> {
7720                         let secp_ctx = Secp256k1::signing_only();
7721                         let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7722                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7723                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7724                 }
7725
7726                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7727                         let secp_ctx = Secp256k1::signing_only();
7728                         let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7729                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7730                 }
7731         }
7732
7733         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7734         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7735                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7736         }
7737
7738         #[test]
7739         fn upfront_shutdown_script_incompatibility() {
7740                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7741                 let non_v0_segwit_shutdown_script =
7742                         ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7743
7744                 let seed = [42; 32];
7745                 let network = Network::Testnet;
7746                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7747                 keys_provider.expect(OnGetShutdownScriptpubkey {
7748                         returns: non_v0_segwit_shutdown_script.clone(),
7749                 });
7750
7751                 let secp_ctx = Secp256k1::new();
7752                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7753                 let config = UserConfig::default();
7754                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7755                         Err(APIError::IncompatibleShutdownScript { script }) => {
7756                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7757                         },
7758                         Err(e) => panic!("Unexpected error: {:?}", e),
7759                         Ok(_) => panic!("Expected error"),
7760                 }
7761         }
7762
7763         // Check that, during channel creation, we use the same feerate in the open channel message
7764         // as we do in the Channel object creation itself.
7765         #[test]
7766         fn test_open_channel_msg_fee() {
7767                 let original_fee = 253;
7768                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7769                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7770                 let secp_ctx = Secp256k1::new();
7771                 let seed = [42; 32];
7772                 let network = Network::Testnet;
7773                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7774
7775                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7776                 let config = UserConfig::default();
7777                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7778
7779                 // Now change the fee so we can check that the fee in the open_channel message is the
7780                 // same as the old fee.
7781                 fee_est.fee_est = 500;
7782                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7783                 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7784         }
7785
7786         #[test]
7787         fn test_holder_vs_counterparty_dust_limit() {
7788                 // Test that when calculating the local and remote commitment transaction fees, the correct
7789                 // dust limits are used.
7790                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7791                 let secp_ctx = Secp256k1::new();
7792                 let seed = [42; 32];
7793                 let network = Network::Testnet;
7794                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7795                 let logger = test_utils::TestLogger::new();
7796                 let best_block = BestBlock::from_network(network);
7797
7798                 // Go through the flow of opening a channel between two nodes, making sure
7799                 // they have different dust limits.
7800
7801                 // Create Node A's channel pointing to Node B's pubkey
7802                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7803                 let config = UserConfig::default();
7804                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7805
7806                 // Create Node B's channel by receiving Node A's open_channel message
7807                 // Make sure A's dust limit is as we expect.
7808                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7809                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7810                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7811
7812                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7813                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
7814                 accept_channel_msg.dust_limit_satoshis = 546;
7815                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7816                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7817
7818                 // Node A --> Node B: funding created
7819                 let output_script = node_a_chan.context.get_funding_redeemscript();
7820                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7821                         value: 10000000, script_pubkey: output_script.clone(),
7822                 }]};
7823                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7824                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7825                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7826
7827                 // Node B --> Node A: funding signed
7828                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7829
7830                 // Put some inbound and outbound HTLCs in A's channel.
7831                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7832                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7833                         htlc_id: 0,
7834                         amount_msat: htlc_amount_msat,
7835                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7836                         cltv_expiry: 300000000,
7837                         state: InboundHTLCState::Committed,
7838                 });
7839
7840                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7841                         htlc_id: 1,
7842                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7843                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7844                         cltv_expiry: 200000000,
7845                         state: OutboundHTLCState::Committed,
7846                         source: HTLCSource::OutboundRoute {
7847                                 path: Path { hops: Vec::new(), blinded_tail: None },
7848                                 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7849                                 first_hop_htlc_msat: 548,
7850                                 payment_id: PaymentId([42; 32]),
7851                         },
7852                         skimmed_fee_msat: None,
7853                 });
7854
7855                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7856                 // the dust limit check.
7857                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7858                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7859                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
7860                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7861
7862                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7863                 // of the HTLCs are seen to be above the dust limit.
7864                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7865                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
7866                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7867                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7868                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7869         }
7870
7871         #[test]
7872         fn test_timeout_vs_success_htlc_dust_limit() {
7873                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7874                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7875                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7876                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7877                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7878                 let secp_ctx = Secp256k1::new();
7879                 let seed = [42; 32];
7880                 let network = Network::Testnet;
7881                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7882
7883                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7884                 let config = UserConfig::default();
7885                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7886
7887                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
7888                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
7889
7890                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7891                 // counted as dust when it shouldn't be.
7892                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7893                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7894                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7895                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7896
7897                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7898                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7899                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7900                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7901                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7902
7903                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7904
7905                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7906                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7907                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7908                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7909                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7910
7911                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7912                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7913                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7914                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7915                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7916         }
7917
7918         #[test]
7919         fn channel_reestablish_no_updates() {
7920                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7921                 let logger = test_utils::TestLogger::new();
7922                 let secp_ctx = Secp256k1::new();
7923                 let seed = [42; 32];
7924                 let network = Network::Testnet;
7925                 let best_block = BestBlock::from_network(network);
7926                 let chain_hash = best_block.block_hash();
7927                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7928
7929                 // Go through the flow of opening a channel between two nodes.
7930
7931                 // Create Node A's channel pointing to Node B's pubkey
7932                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7933                 let config = UserConfig::default();
7934                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7935
7936                 // Create Node B's channel by receiving Node A's open_channel message
7937                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
7938                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7939                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7940
7941                 // Node B --> Node A: accept channel
7942                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
7943                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7944
7945                 // Node A --> Node B: funding created
7946                 let output_script = node_a_chan.context.get_funding_redeemscript();
7947                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7948                         value: 10000000, script_pubkey: output_script.clone(),
7949                 }]};
7950                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7951                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7952                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7953
7954                 // Node B --> Node A: funding signed
7955                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7956
7957                 // Now disconnect the two nodes and check that the commitment point in
7958                 // Node B's channel_reestablish message is sane.
7959                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
7960                 let msg = node_b_chan.get_channel_reestablish(&&logger);
7961                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7962                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7963                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7964
7965                 // Check that the commitment point in Node A's channel_reestablish message
7966                 // is sane.
7967                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
7968                 let msg = node_a_chan.get_channel_reestablish(&&logger);
7969                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7970                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7971                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7972         }
7973
7974         #[test]
7975         fn test_configured_holder_max_htlc_value_in_flight() {
7976                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7977                 let logger = test_utils::TestLogger::new();
7978                 let secp_ctx = Secp256k1::new();
7979                 let seed = [42; 32];
7980                 let network = Network::Testnet;
7981                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7982                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7983                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7984
7985                 let mut config_2_percent = UserConfig::default();
7986                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
7987                 let mut config_99_percent = UserConfig::default();
7988                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
7989                 let mut config_0_percent = UserConfig::default();
7990                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
7991                 let mut config_101_percent = UserConfig::default();
7992                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
7993
7994                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
7995                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7996                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
7997                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
7998                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
7999                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8000
8001                 // Test with the upper bound - 1 of valid values (99%).
8002                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
8003                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8004                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8005
8006                 let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
8007
8008                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8009                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8010                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8011                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8012                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8013                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8014
8015                 // Test with the upper bound - 1 of valid values (99%).
8016                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8017                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8018                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8019
8020                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8021                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8022                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
8023                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8024                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8025
8026                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8027                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8028                 // than 100.
8029                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
8030                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8031                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8032
8033                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8034                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8035                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8036                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8037                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8038
8039                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8040                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8041                 // than 100.
8042                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8043                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8044                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8045         }
8046
8047         #[test]
8048         fn test_configured_holder_selected_channel_reserve_satoshis() {
8049
8050                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8051                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8052                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8053
8054                 // Test with valid but unreasonably high channel reserves
8055                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8056                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8057                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8058
8059                 // Test with calculated channel reserve less than lower bound
8060                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8061                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8062
8063                 // Test with invalid channel reserves since sum of both is greater than or equal
8064                 // to channel value
8065                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8066                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8067         }
8068
8069         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8070                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8071                 let logger = test_utils::TestLogger::new();
8072                 let secp_ctx = Secp256k1::new();
8073                 let seed = [42; 32];
8074                 let network = Network::Testnet;
8075                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8076                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8077                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8078
8079
8080                 let mut outbound_node_config = UserConfig::default();
8081                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8082                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
8083
8084                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8085                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8086
8087                 let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
8088                 let mut inbound_node_config = UserConfig::default();
8089                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8090
8091                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8092                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8093
8094                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8095
8096                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8097                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8098                 } else {
8099                         // Channel Negotiations failed
8100                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8101                         assert!(result.is_err());
8102                 }
8103         }
8104
8105         #[test]
8106         fn channel_update() {
8107                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8108                 let logger = test_utils::TestLogger::new();
8109                 let secp_ctx = Secp256k1::new();
8110                 let seed = [42; 32];
8111                 let network = Network::Testnet;
8112                 let best_block = BestBlock::from_network(network);
8113                 let chain_hash = genesis_block(network).header.block_hash();
8114                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8115
8116                 // Create Node A's channel pointing to Node B's pubkey
8117                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8118                 let config = UserConfig::default();
8119                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8120
8121                 // Create Node B's channel by receiving Node A's open_channel message
8122                 // Make sure A's dust limit is as we expect.
8123                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
8124                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8125                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8126
8127                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8128                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8129                 accept_channel_msg.dust_limit_satoshis = 546;
8130                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8131                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8132
8133                 // Node A --> Node B: funding created
8134                 let output_script = node_a_chan.context.get_funding_redeemscript();
8135                 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8136                         value: 10000000, script_pubkey: output_script.clone(),
8137                 }]};
8138                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8139                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8140                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8141
8142                 // Node B --> Node A: funding signed
8143                 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8144
8145                 // Make sure that receiving a channel update will update the Channel as expected.
8146                 let update = ChannelUpdate {
8147                         contents: UnsignedChannelUpdate {
8148                                 chain_hash,
8149                                 short_channel_id: 0,
8150                                 timestamp: 0,
8151                                 flags: 0,
8152                                 cltv_expiry_delta: 100,
8153                                 htlc_minimum_msat: 5,
8154                                 htlc_maximum_msat: MAX_VALUE_MSAT,
8155                                 fee_base_msat: 110,
8156                                 fee_proportional_millionths: 11,
8157                                 excess_data: Vec::new(),
8158                         },
8159                         signature: Signature::from(unsafe { FFISignature::new() })
8160                 };
8161                 assert!(node_a_chan.channel_update(&update).unwrap());
8162
8163                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8164                 // change our official htlc_minimum_msat.
8165                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8166                 match node_a_chan.context.counterparty_forwarding_info() {
8167                         Some(info) => {
8168                                 assert_eq!(info.cltv_expiry_delta, 100);
8169                                 assert_eq!(info.fee_base_msat, 110);
8170                                 assert_eq!(info.fee_proportional_millionths, 11);
8171                         },
8172                         None => panic!("expected counterparty forwarding info to be Some")
8173                 }
8174
8175                 assert!(!node_a_chan.channel_update(&update).unwrap());
8176         }
8177
8178         #[cfg(feature = "_test_vectors")]
8179         #[test]
8180         fn outbound_commitment_test() {
8181                 use bitcoin::util::sighash;
8182                 use bitcoin::consensus::encode::serialize;
8183                 use bitcoin::blockdata::transaction::EcdsaSighashType;
8184                 use bitcoin::hashes::hex::FromHex;
8185                 use bitcoin::hash_types::Txid;
8186                 use bitcoin::secp256k1::Message;
8187                 use crate::events::bump_transaction::{ChannelDerivationParameters, HTLCDescriptor};
8188                 use crate::sign::EcdsaChannelSigner;
8189                 use crate::ln::PaymentPreimage;
8190                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8191                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8192                 use crate::util::logger::Logger;
8193                 use crate::sync::Arc;
8194
8195                 // Test vectors from BOLT 3 Appendices C and F (anchors):
8196                 let feeest = TestFeeEstimator{fee_est: 15000};
8197                 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8198                 let secp_ctx = Secp256k1::new();
8199
8200                 let mut signer = InMemorySigner::new(
8201                         &secp_ctx,
8202                         SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8203                         SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8204                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8205                         SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8206                         SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8207
8208                         // These aren't set in the test vectors:
8209                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8210                         10_000_000,
8211                         [0; 32],
8212                         [0; 32],
8213                 );
8214
8215                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8216                                 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8217                 let keys_provider = Keys { signer: signer.clone() };
8218
8219                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8220                 let mut config = UserConfig::default();
8221                 config.channel_handshake_config.announced_channel = false;
8222                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
8223                 chan.context.holder_dust_limit_satoshis = 546;
8224                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8225
8226                 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8227
8228                 let counterparty_pubkeys = ChannelPublicKeys {
8229                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8230                         revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8231                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8232                         delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8233                         htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8234                 };
8235                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8236                         CounterpartyChannelTransactionParameters {
8237                                 pubkeys: counterparty_pubkeys.clone(),
8238                                 selected_contest_delay: 144
8239                         });
8240                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8241                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8242
8243                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8244                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8245
8246                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8247                            hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8248
8249                 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8250                            hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8251
8252                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8253                 // derived from a commitment_seed, so instead we copy it here and call
8254                 // build_commitment_transaction.
8255                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8256                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8257                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8258                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8259                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8260
8261                 macro_rules! test_commitment {
8262                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8263                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8264                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8265                         };
8266                 }
8267
8268                 macro_rules! test_commitment_with_anchors {
8269                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8270                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8271                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8272                         };
8273                 }
8274
8275                 macro_rules! test_commitment_common {
8276                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8277                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8278                         } ) => { {
8279                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8280                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8281
8282                                         let htlcs = commitment_stats.htlcs_included.drain(..)
8283                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8284                                                 .collect();
8285                                         (commitment_stats.tx, htlcs)
8286                                 };
8287                                 let trusted_tx = commitment_tx.trust();
8288                                 let unsigned_tx = trusted_tx.built_transaction();
8289                                 let redeemscript = chan.context.get_funding_redeemscript();
8290                                 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8291                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8292                                 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8293                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8294
8295                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8296                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8297                                 let mut counterparty_htlc_sigs = Vec::new();
8298                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8299                                 $({
8300                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8301                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8302                                         counterparty_htlc_sigs.push(remote_signature);
8303                                 })*
8304                                 assert_eq!(htlcs.len(), per_htlc.len());
8305
8306                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
8307                                         commitment_tx.clone(),
8308                                         counterparty_signature,
8309                                         counterparty_htlc_sigs,
8310                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8311                                         chan.context.counterparty_funding_pubkey()
8312                                 );
8313                                 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8314                                 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8315
8316                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
8317                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8318                                 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8319
8320                                 // ((htlc, counterparty_sig), (index, holder_sig))
8321                                 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8322
8323                                 $({
8324                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
8325                                         let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8326
8327                                         let ref htlc = htlcs[$htlc_idx];
8328                                         let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8329                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8330                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8331                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8332                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8333                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8334                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8335
8336                                         let mut preimage: Option<PaymentPreimage> = None;
8337                                         if !htlc.offered {
8338                                                 for i in 0..5 {
8339                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8340                                                         if out == htlc.payment_hash {
8341                                                                 preimage = Some(PaymentPreimage([i; 32]));
8342                                                         }
8343                                                 }
8344
8345                                                 assert!(preimage.is_some());
8346                                         }
8347
8348                                         let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8349                                         let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8350                                                 channel_derivation_parameters: ChannelDerivationParameters {
8351                                                         value_satoshis: chan.context.channel_value_satoshis,
8352                                                         keys_id: chan.context.channel_keys_id,
8353                                                         transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8354                                                 },
8355                                                 commitment_txid: trusted_tx.txid(),
8356                                                 per_commitment_number: trusted_tx.commitment_number(),
8357                                                 per_commitment_point: trusted_tx.per_commitment_point(),
8358                                                 feerate_per_kw: trusted_tx.feerate_per_kw(),
8359                                                 htlc: htlc.clone(),
8360                                                 preimage: preimage.clone(),
8361                                                 counterparty_sig: *htlc_counterparty_sig,
8362                                         }, &secp_ctx).unwrap();
8363                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8364                                         assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8365
8366                                         let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8367                                         assert_eq!(signature, htlc_holder_sig, "htlc sig");
8368                                         let trusted_tx = holder_commitment_tx.trust();
8369                                         htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8370                                         log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
8371                                         assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8372                                 })*
8373                                 assert!(htlc_counterparty_sig_iter.next().is_none());
8374                         } }
8375                 }
8376
8377                 // anchors: simple commitment tx with no HTLCs and single anchor
8378                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8379                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8380                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8381
8382                 // simple commitment tx with no HTLCs
8383                 chan.context.value_to_self_msat = 7000000000;
8384
8385                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8386                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8387                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8388
8389                 // anchors: simple commitment tx with no HTLCs
8390                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8391                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8392                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8393
8394                 chan.context.pending_inbound_htlcs.push({
8395                         let mut out = InboundHTLCOutput{
8396                                 htlc_id: 0,
8397                                 amount_msat: 1000000,
8398                                 cltv_expiry: 500,
8399                                 payment_hash: PaymentHash([0; 32]),
8400                                 state: InboundHTLCState::Committed,
8401                         };
8402                         out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8403                         out
8404                 });
8405                 chan.context.pending_inbound_htlcs.push({
8406                         let mut out = InboundHTLCOutput{
8407                                 htlc_id: 1,
8408                                 amount_msat: 2000000,
8409                                 cltv_expiry: 501,
8410                                 payment_hash: PaymentHash([0; 32]),
8411                                 state: InboundHTLCState::Committed,
8412                         };
8413                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8414                         out
8415                 });
8416                 chan.context.pending_outbound_htlcs.push({
8417                         let mut out = OutboundHTLCOutput{
8418                                 htlc_id: 2,
8419                                 amount_msat: 2000000,
8420                                 cltv_expiry: 502,
8421                                 payment_hash: PaymentHash([0; 32]),
8422                                 state: OutboundHTLCState::Committed,
8423                                 source: HTLCSource::dummy(),
8424                                 skimmed_fee_msat: None,
8425                         };
8426                         out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8427                         out
8428                 });
8429                 chan.context.pending_outbound_htlcs.push({
8430                         let mut out = OutboundHTLCOutput{
8431                                 htlc_id: 3,
8432                                 amount_msat: 3000000,
8433                                 cltv_expiry: 503,
8434                                 payment_hash: PaymentHash([0; 32]),
8435                                 state: OutboundHTLCState::Committed,
8436                                 source: HTLCSource::dummy(),
8437                                 skimmed_fee_msat: None,
8438                         };
8439                         out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8440                         out
8441                 });
8442                 chan.context.pending_inbound_htlcs.push({
8443                         let mut out = InboundHTLCOutput{
8444                                 htlc_id: 4,
8445                                 amount_msat: 4000000,
8446                                 cltv_expiry: 504,
8447                                 payment_hash: PaymentHash([0; 32]),
8448                                 state: InboundHTLCState::Committed,
8449                         };
8450                         out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8451                         out
8452                 });
8453
8454                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8455                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8456                 chan.context.feerate_per_kw = 0;
8457
8458                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8459                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8460                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8461
8462                                   { 0,
8463                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8464                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8465                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8466
8467                                   { 1,
8468                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8469                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8470                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8471
8472                                   { 2,
8473                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8474                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8475                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8476
8477                                   { 3,
8478                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8479                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8480                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8481
8482                                   { 4,
8483                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8484                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8485                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8486                 } );
8487
8488                 // commitment tx with seven outputs untrimmed (maximum feerate)
8489                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8490                 chan.context.feerate_per_kw = 647;
8491
8492                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8493                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8494                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8495
8496                                   { 0,
8497                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8498                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8499                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8500
8501                                   { 1,
8502                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8503                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8504                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8505
8506                                   { 2,
8507                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8508                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8509                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8510
8511                                   { 3,
8512                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8513                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8514                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8515
8516                                   { 4,
8517                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8518                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8519                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8520                 } );
8521
8522                 // commitment tx with six outputs untrimmed (minimum feerate)
8523                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8524                 chan.context.feerate_per_kw = 648;
8525
8526                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8527                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8528                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8529
8530                                   { 0,
8531                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8532                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8533                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8534
8535                                   { 1,
8536                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8537                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8538                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8539
8540                                   { 2,
8541                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8542                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8543                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8544
8545                                   { 3,
8546                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8547                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8548                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8549                 } );
8550
8551                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8552                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8553                 chan.context.feerate_per_kw = 645;
8554                 chan.context.holder_dust_limit_satoshis = 1001;
8555
8556                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8557                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8558                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8559
8560                                   { 0,
8561                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8562                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8563                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8564
8565                                   { 1,
8566                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8567                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8568                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8569
8570                                   { 2,
8571                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8572                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8573                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8574
8575                                   { 3,
8576                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8577                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8578                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8579                 } );
8580
8581                 // commitment tx with six outputs untrimmed (maximum feerate)
8582                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8583                 chan.context.feerate_per_kw = 2069;
8584                 chan.context.holder_dust_limit_satoshis = 546;
8585
8586                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8587                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8588                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8589
8590                                   { 0,
8591                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8592                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8593                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8594
8595                                   { 1,
8596                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8597                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8598                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8599
8600                                   { 2,
8601                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8602                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8603                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8604
8605                                   { 3,
8606                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8607                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8608                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8609                 } );
8610
8611                 // commitment tx with five outputs untrimmed (minimum feerate)
8612                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8613                 chan.context.feerate_per_kw = 2070;
8614
8615                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8616                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8617                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8618
8619                                   { 0,
8620                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8621                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8622                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8623
8624                                   { 1,
8625                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8626                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8627                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8628
8629                                   { 2,
8630                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8631                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8632                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8633                 } );
8634
8635                 // commitment tx with five outputs untrimmed (maximum feerate)
8636                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8637                 chan.context.feerate_per_kw = 2194;
8638
8639                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8640                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8641                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8642
8643                                   { 0,
8644                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8645                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8646                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8647
8648                                   { 1,
8649                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8650                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8651                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8652
8653                                   { 2,
8654                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8655                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8656                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8657                 } );
8658
8659                 // commitment tx with four outputs untrimmed (minimum feerate)
8660                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8661                 chan.context.feerate_per_kw = 2195;
8662
8663                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8664                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8665                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8666
8667                                   { 0,
8668                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8669                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8670                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8671
8672                                   { 1,
8673                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8674                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8675                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8676                 } );
8677
8678                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8679                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8680                 chan.context.feerate_per_kw = 2185;
8681                 chan.context.holder_dust_limit_satoshis = 2001;
8682                 let cached_channel_type = chan.context.channel_type;
8683                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8684
8685                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8686                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8687                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8688
8689                                   { 0,
8690                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8691                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8692                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8693
8694                                   { 1,
8695                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8696                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8697                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8698                 } );
8699
8700                 // commitment tx with four outputs untrimmed (maximum feerate)
8701                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8702                 chan.context.feerate_per_kw = 3702;
8703                 chan.context.holder_dust_limit_satoshis = 546;
8704                 chan.context.channel_type = cached_channel_type.clone();
8705
8706                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8707                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8708                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8709
8710                                   { 0,
8711                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8712                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8713                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8714
8715                                   { 1,
8716                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8717                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8718                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8719                 } );
8720
8721                 // commitment tx with three outputs untrimmed (minimum feerate)
8722                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8723                 chan.context.feerate_per_kw = 3703;
8724
8725                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8726                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8727                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8728
8729                                   { 0,
8730                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8731                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8732                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8733                 } );
8734
8735                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8736                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8737                 chan.context.feerate_per_kw = 3687;
8738                 chan.context.holder_dust_limit_satoshis = 3001;
8739                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8740
8741                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8742                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8743                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8744
8745                                   { 0,
8746                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8747                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8748                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8749                 } );
8750
8751                 // commitment tx with three outputs untrimmed (maximum feerate)
8752                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8753                 chan.context.feerate_per_kw = 4914;
8754                 chan.context.holder_dust_limit_satoshis = 546;
8755                 chan.context.channel_type = cached_channel_type.clone();
8756
8757                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8758                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8759                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8760
8761                                   { 0,
8762                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8763                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8764                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8765                 } );
8766
8767                 // commitment tx with two outputs untrimmed (minimum feerate)
8768                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8769                 chan.context.feerate_per_kw = 4915;
8770                 chan.context.holder_dust_limit_satoshis = 546;
8771
8772                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8773                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8774                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8775
8776                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8777                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8778                 chan.context.feerate_per_kw = 4894;
8779                 chan.context.holder_dust_limit_satoshis = 4001;
8780                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8781
8782                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8783                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8784                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8785
8786                 // commitment tx with two outputs untrimmed (maximum feerate)
8787                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8788                 chan.context.feerate_per_kw = 9651180;
8789                 chan.context.holder_dust_limit_satoshis = 546;
8790                 chan.context.channel_type = cached_channel_type.clone();
8791
8792                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8793                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8794                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8795
8796                 // commitment tx with one output untrimmed (minimum feerate)
8797                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8798                 chan.context.feerate_per_kw = 9651181;
8799
8800                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8801                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8802                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8803
8804                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8805                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8806                 chan.context.feerate_per_kw = 6216010;
8807                 chan.context.holder_dust_limit_satoshis = 4001;
8808                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8809
8810                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8811                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8812                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8813
8814                 // commitment tx with fee greater than funder amount
8815                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8816                 chan.context.feerate_per_kw = 9651936;
8817                 chan.context.holder_dust_limit_satoshis = 546;
8818                 chan.context.channel_type = cached_channel_type;
8819
8820                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8821                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8822                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8823
8824                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8825                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8826                 chan.context.feerate_per_kw = 253;
8827                 chan.context.pending_inbound_htlcs.clear();
8828                 chan.context.pending_inbound_htlcs.push({
8829                         let mut out = InboundHTLCOutput{
8830                                 htlc_id: 1,
8831                                 amount_msat: 2000000,
8832                                 cltv_expiry: 501,
8833                                 payment_hash: PaymentHash([0; 32]),
8834                                 state: InboundHTLCState::Committed,
8835                         };
8836                         out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8837                         out
8838                 });
8839                 chan.context.pending_outbound_htlcs.clear();
8840                 chan.context.pending_outbound_htlcs.push({
8841                         let mut out = OutboundHTLCOutput{
8842                                 htlc_id: 6,
8843                                 amount_msat: 5000001,
8844                                 cltv_expiry: 506,
8845                                 payment_hash: PaymentHash([0; 32]),
8846                                 state: OutboundHTLCState::Committed,
8847                                 source: HTLCSource::dummy(),
8848                                 skimmed_fee_msat: None,
8849                         };
8850                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8851                         out
8852                 });
8853                 chan.context.pending_outbound_htlcs.push({
8854                         let mut out = OutboundHTLCOutput{
8855                                 htlc_id: 5,
8856                                 amount_msat: 5000000,
8857                                 cltv_expiry: 505,
8858                                 payment_hash: PaymentHash([0; 32]),
8859                                 state: OutboundHTLCState::Committed,
8860                                 source: HTLCSource::dummy(),
8861                                 skimmed_fee_msat: None,
8862                         };
8863                         out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8864                         out
8865                 });
8866
8867                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8868                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8869                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8870
8871                                   { 0,
8872                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8873                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8874                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8875                                   { 1,
8876                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8877                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8878                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8879                                   { 2,
8880                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8881                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8882                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8883                 } );
8884
8885                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8886                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8887                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8888                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8889
8890                                   { 0,
8891                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8892                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8893                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8894                                   { 1,
8895                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8896                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8897                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8898                                   { 2,
8899                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8900                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8901                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8902                 } );
8903         }
8904
8905         #[test]
8906         fn test_per_commitment_secret_gen() {
8907                 // Test vectors from BOLT 3 Appendix D:
8908
8909                 let mut seed = [0; 32];
8910                 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8911                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8912                            hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8913
8914                 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8915                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8916                            hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8917
8918                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8919                            hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8920
8921                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8922                            hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8923
8924                 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8925                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8926                            hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
8927         }
8928
8929         #[test]
8930         fn test_key_derivation() {
8931                 // Test vectors from BOLT 3 Appendix E:
8932                 let secp_ctx = Secp256k1::new();
8933
8934                 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
8935                 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8936
8937                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
8938                 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
8939
8940                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8941                 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
8942
8943                 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8944                                 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
8945
8946                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
8947                                 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
8948
8949                 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8950                                 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
8951
8952                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
8953                                 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
8954         }
8955
8956         #[test]
8957         fn test_zero_conf_channel_type_support() {
8958                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8959                 let secp_ctx = Secp256k1::new();
8960                 let seed = [42; 32];
8961                 let network = Network::Testnet;
8962                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8963                 let logger = test_utils::TestLogger::new();
8964
8965                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8966                 let config = UserConfig::default();
8967                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
8968                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8969
8970                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8971                 channel_type_features.set_zero_conf_required();
8972
8973                 let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
8974                 open_channel_msg.channel_type = Some(channel_type_features);
8975                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8976                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
8977                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
8978                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
8979                 assert!(res.is_ok());
8980         }
8981
8982         #[test]
8983         fn test_supports_anchors_zero_htlc_tx_fee() {
8984                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
8985                 // resulting `channel_type`.
8986                 let secp_ctx = Secp256k1::new();
8987                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8988                 let network = Network::Testnet;
8989                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8990                 let logger = test_utils::TestLogger::new();
8991
8992                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8993                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8994
8995                 let mut config = UserConfig::default();
8996                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
8997
8998                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
8999                 // need to signal it.
9000                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9001                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9002                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9003                         &config, 0, 42
9004                 ).unwrap();
9005                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9006
9007                 let mut expected_channel_type = ChannelTypeFeatures::empty();
9008                 expected_channel_type.set_static_remote_key_required();
9009                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9010
9011                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9012                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9013                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9014                 ).unwrap();
9015
9016                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9017                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9018                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9019                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9020                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9021                 ).unwrap();
9022
9023                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9024                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9025         }
9026
9027         #[test]
9028         fn test_rejects_implicit_simple_anchors() {
9029                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9030                 // each side's `InitFeatures`, it is rejected.
9031                 let secp_ctx = Secp256k1::new();
9032                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9033                 let network = Network::Testnet;
9034                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9035                 let logger = test_utils::TestLogger::new();
9036
9037                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9038                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9039
9040                 let config = UserConfig::default();
9041
9042                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9043                 let static_remote_key_required: u64 = 1 << 12;
9044                 let simple_anchors_required: u64 = 1 << 20;
9045                 let raw_init_features = static_remote_key_required | simple_anchors_required;
9046                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9047
9048                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9049                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9050                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9051                 ).unwrap();
9052
9053                 // Set `channel_type` to `None` to force the implicit feature negotiation.
9054                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9055                 open_channel_msg.channel_type = None;
9056
9057                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9058                 // `static_remote_key`, it will fail the channel.
9059                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9060                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9061                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9062                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9063                 );
9064                 assert!(channel_b.is_err());
9065         }
9066
9067         #[test]
9068         fn test_rejects_simple_anchors_channel_type() {
9069                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9070                 // it is rejected.
9071                 let secp_ctx = Secp256k1::new();
9072                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9073                 let network = Network::Testnet;
9074                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9075                 let logger = test_utils::TestLogger::new();
9076
9077                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9078                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9079
9080                 let config = UserConfig::default();
9081
9082                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9083                 let static_remote_key_required: u64 = 1 << 12;
9084                 let simple_anchors_required: u64 = 1 << 20;
9085                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9086                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9087                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9088                 assert!(!simple_anchors_init.requires_unknown_bits());
9089                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9090
9091                 // First, we'll try to open a channel between A and B where A requests a channel type for
9092                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9093                 // B as it's not supported by LDK.
9094                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9095                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9096                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9097                 ).unwrap();
9098
9099                 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9100                 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9101
9102                 let res = InboundV1Channel::<&TestKeysInterface>::new(
9103                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9104                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9105                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9106                 );
9107                 assert!(res.is_err());
9108
9109                 // Then, we'll try to open another channel where A requests a channel type for
9110                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9111                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9112                 // LDK.
9113                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9114                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9115                         10000000, 100000, 42, &config, 0, 42
9116                 ).unwrap();
9117
9118                 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
9119
9120                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9121                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9122                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9123                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9124                 ).unwrap();
9125
9126                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9127                 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9128
9129                 let res = channel_a.accept_channel(
9130                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9131                 );
9132                 assert!(res.is_err());
9133         }
9134
9135         #[test]
9136         fn test_waiting_for_batch() {
9137                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9138                 let logger = test_utils::TestLogger::new();
9139                 let secp_ctx = Secp256k1::new();
9140                 let seed = [42; 32];
9141                 let network = Network::Testnet;
9142                 let best_block = BestBlock::from_network(network);
9143                 let chain_hash = genesis_block(network).header.block_hash();
9144                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9145
9146                 let mut config = UserConfig::default();
9147                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9148                 // channel in a batch before all channels are ready.
9149                 config.channel_handshake_limits.trust_own_funding_0conf = true;
9150
9151                 // Create a channel from node a to node b that will be part of batch funding.
9152                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9153                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9154                         &feeest,
9155                         &&keys_provider,
9156                         &&keys_provider,
9157                         node_b_node_id,
9158                         &channelmanager::provided_init_features(&config),
9159                         10000000,
9160                         100000,
9161                         42,
9162                         &config,
9163                         0,
9164                         42,
9165                 ).unwrap();
9166
9167                 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
9168                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9169                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9170                         &feeest,
9171                         &&keys_provider,
9172                         &&keys_provider,
9173                         node_b_node_id,
9174                         &channelmanager::provided_channel_type_features(&config),
9175                         &channelmanager::provided_init_features(&config),
9176                         &open_channel_msg,
9177                         7,
9178                         &config,
9179                         0,
9180                         &&logger,
9181                         true,  // Allow node b to send a 0conf channel_ready.
9182                 ).unwrap();
9183
9184                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9185                 node_a_chan.accept_channel(
9186                         &accept_channel_msg,
9187                         &config.channel_handshake_limits,
9188                         &channelmanager::provided_init_features(&config),
9189                 ).unwrap();
9190
9191                 // Fund the channel with a batch funding transaction.
9192                 let output_script = node_a_chan.context.get_funding_redeemscript();
9193                 let tx = Transaction {
9194                         version: 1,
9195                         lock_time: PackedLockTime::ZERO,
9196                         input: Vec::new(),
9197                         output: vec![
9198                                 TxOut {
9199                                         value: 10000000, script_pubkey: output_script.clone(),
9200                                 },
9201                                 TxOut {
9202                                         value: 10000000, script_pubkey: Builder::new().into_script(),
9203                                 },
9204                         ]};
9205                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9206                 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9207                         tx.clone(),
9208                         funding_outpoint,
9209                         true,
9210                         &&logger,
9211                 ).map_err(|_| ()).unwrap();
9212                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9213                         &funding_created_msg,
9214                         best_block,
9215                         &&keys_provider,
9216                         &&logger,
9217                 ).map_err(|_| ()).unwrap();
9218                 let node_b_updates = node_b_chan.monitor_updating_restored(
9219                         &&logger,
9220                         &&keys_provider,
9221                         chain_hash,
9222                         &config,
9223                         0,
9224                 );
9225
9226                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9227                 // broadcasting the funding transaction until the batch is ready.
9228                 let _ = node_a_chan.funding_signed(
9229                         &funding_signed_msg,
9230                         best_block,
9231                         &&keys_provider,
9232                         &&logger,
9233                 ).unwrap();
9234                 let node_a_updates = node_a_chan.monitor_updating_restored(
9235                         &&logger,
9236                         &&keys_provider,
9237                         chain_hash,
9238                         &config,
9239                         0,
9240                 );
9241                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9242                 // as the funding transaction depends on all channels in the batch becoming ready.
9243                 assert!(node_a_updates.channel_ready.is_none());
9244                 assert!(node_a_updates.funding_broadcastable.is_none());
9245                 assert_eq!(
9246                         node_a_chan.context.channel_state,
9247                         ChannelState::FundingSent as u32 |
9248                         ChannelState::WaitingForBatch as u32,
9249                 );
9250
9251                 // It is possible to receive a 0conf channel_ready from the remote node.
9252                 node_a_chan.channel_ready(
9253                         &node_b_updates.channel_ready.unwrap(),
9254                         &&keys_provider,
9255                         chain_hash,
9256                         &config,
9257                         &best_block,
9258                         &&logger,
9259                 ).unwrap();
9260                 assert_eq!(
9261                         node_a_chan.context.channel_state,
9262                         ChannelState::FundingSent as u32 |
9263                         ChannelState::WaitingForBatch as u32 |
9264                         ChannelState::TheirChannelReady as u32,
9265                 );
9266
9267                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9268                 node_a_chan.set_batch_ready();
9269                 assert_eq!(
9270                         node_a_chan.context.channel_state,
9271                         ChannelState::FundingSent as u32 |
9272                         ChannelState::TheirChannelReady as u32,
9273                 );
9274                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
9275         }
9276 }