1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script,Builder};
12 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
13 use bitcoin::util::sighash;
14 use bitcoin::consensus::encode;
16 use bitcoin::hashes::Hash;
17 use bitcoin::hashes::sha256::Hash as Sha256;
18 use bitcoin::hashes::sha256d::Hash as Sha256d;
19 use bitcoin::hash_types::{Txid, BlockHash};
21 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
22 use bitcoin::secp256k1::{PublicKey,SecretKey};
23 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
24 use bitcoin::secp256k1;
26 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
27 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs::DecodeError;
30 use crate::ln::script::{self, ShutdownScript};
31 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
32 use crate::ln::chan_utils::{
33 CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight,
34 htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction,
35 HolderCommitmentTransaction, ChannelTransactionParameters,
36 CounterpartyChannelTransactionParameters, MAX_HTLCS, commit_tx_fee_sat,
37 get_commitment_transaction_number_obscure_factor, ClosingTransaction
39 use crate::ln::chan_utils;
40 use crate::ln::onion_utils::HTLCFailReason;
41 use crate::chain::BestBlock;
42 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
43 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
44 use crate::chain::transaction::{OutPoint, TransactionData};
45 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
46 use crate::events::ClosureReason;
47 use crate::routing::gossip::NodeId;
48 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
49 use crate::util::logger::Logger;
50 use crate::util::errors::APIError;
51 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
52 use crate::util::scid_utils::scid_from_parts;
55 use crate::prelude::*;
56 use core::{cmp,mem,fmt};
57 use core::convert::TryInto;
59 #[cfg(any(test, fuzzing, debug_assertions))]
60 use crate::sync::Mutex;
61 use bitcoin::hashes::hex::ToHex;
62 use crate::sign::type_resolver::ChannelSignerType;
65 pub struct ChannelValueStat {
66 pub value_to_self_msat: u64,
67 pub channel_value_msat: u64,
68 pub channel_reserve_msat: u64,
69 pub pending_outbound_htlcs_amount_msat: u64,
70 pub pending_inbound_htlcs_amount_msat: u64,
71 pub holding_cell_outbound_amount_msat: u64,
72 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
73 pub counterparty_dust_limit_msat: u64,
76 pub struct AvailableBalances {
77 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
78 pub balance_msat: u64,
79 /// Total amount available for our counterparty to send to us.
80 pub inbound_capacity_msat: u64,
81 /// Total amount available for us to send to our counterparty.
82 pub outbound_capacity_msat: u64,
83 /// The maximum value we can assign to the next outbound HTLC
84 pub next_outbound_htlc_limit_msat: u64,
85 /// The minimum value we can assign to the next outbound HTLC
86 pub next_outbound_htlc_minimum_msat: u64,
89 #[derive(Debug, Clone, Copy, PartialEq)]
91 // Inbound states mirroring InboundHTLCState
93 AwaitingRemoteRevokeToAnnounce,
94 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
95 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
96 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
97 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
98 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
100 // Outbound state can only be `LocalAnnounced` or `Committed`
104 enum InboundHTLCRemovalReason {
105 FailRelay(msgs::OnionErrorPacket),
106 FailMalformed(([u8; 32], u16)),
107 Fulfill(PaymentPreimage),
110 enum InboundHTLCState {
111 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
112 /// update_add_htlc message for this HTLC.
113 RemoteAnnounced(PendingHTLCStatus),
114 /// Included in a received commitment_signed message (implying we've
115 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
116 /// state (see the example below). We have not yet included this HTLC in a
117 /// commitment_signed message because we are waiting on the remote's
118 /// aforementioned state revocation. One reason this missing remote RAA
119 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
120 /// is because every time we create a new "state", i.e. every time we sign a
121 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
122 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
123 /// sent provided the per_commitment_point for our current commitment tx.
124 /// The other reason we should not send a commitment_signed without their RAA
125 /// is because their RAA serves to ACK our previous commitment_signed.
127 /// Here's an example of how an HTLC could come to be in this state:
128 /// remote --> update_add_htlc(prev_htlc) --> local
129 /// remote --> commitment_signed(prev_htlc) --> local
130 /// remote <-- revoke_and_ack <-- local
131 /// remote <-- commitment_signed(prev_htlc) <-- local
132 /// [note that here, the remote does not respond with a RAA]
133 /// remote --> update_add_htlc(this_htlc) --> local
134 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
135 /// Now `this_htlc` will be assigned this state. It's unable to be officially
136 /// accepted, i.e. included in a commitment_signed, because we're missing the
137 /// RAA that provides our next per_commitment_point. The per_commitment_point
138 /// is used to derive commitment keys, which are used to construct the
139 /// signatures in a commitment_signed message.
140 /// Implies AwaitingRemoteRevoke.
142 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
143 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
144 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
145 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
146 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
147 /// channel (before it can then get forwarded and/or removed).
148 /// Implies AwaitingRemoteRevoke.
149 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
151 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
152 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
154 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
155 /// commitment transaction without it as otherwise we'll have to force-close the channel to
156 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
157 /// anyway). That said, ChannelMonitor does this for us (see
158 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
159 /// our own local state before then, once we're sure that the next commitment_signed and
160 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
161 LocalRemoved(InboundHTLCRemovalReason),
164 struct InboundHTLCOutput {
168 payment_hash: PaymentHash,
169 state: InboundHTLCState,
172 enum OutboundHTLCState {
173 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
174 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
175 /// we will promote to Committed (note that they may not accept it until the next time we
176 /// revoke, but we don't really care about that:
177 /// * they've revoked, so worst case we can announce an old state and get our (option on)
178 /// money back (though we won't), and,
179 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
180 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
181 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
182 /// we'll never get out of sync).
183 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
184 /// OutboundHTLCOutput's size just for a temporary bit
185 LocalAnnounced(Box<msgs::OnionPacket>),
187 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
188 /// the change (though they'll need to revoke before we fail the payment).
189 RemoteRemoved(OutboundHTLCOutcome),
190 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
191 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
192 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
193 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
194 /// remote revoke_and_ack on a previous state before we can do so.
195 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
196 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
197 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
198 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
199 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
200 /// revoke_and_ack to drop completely.
201 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
205 enum OutboundHTLCOutcome {
206 /// LDK version 0.0.105+ will always fill in the preimage here.
207 Success(Option<PaymentPreimage>),
208 Failure(HTLCFailReason),
211 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
212 fn from(o: Option<HTLCFailReason>) -> Self {
214 None => OutboundHTLCOutcome::Success(None),
215 Some(r) => OutboundHTLCOutcome::Failure(r)
220 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
221 fn into(self) -> Option<&'a HTLCFailReason> {
223 OutboundHTLCOutcome::Success(_) => None,
224 OutboundHTLCOutcome::Failure(ref r) => Some(r)
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 skimmed_fee_msat: Option<u64>,
239 /// See AwaitingRemoteRevoke ChannelState for more info
240 enum HTLCUpdateAwaitingACK {
241 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
245 payment_hash: PaymentHash,
247 onion_routing_packet: msgs::OnionPacket,
248 // The extra fee we're skimming off the top of this HTLC.
249 skimmed_fee_msat: Option<u64>,
252 payment_preimage: PaymentPreimage,
257 err_packet: msgs::OnionErrorPacket,
261 /// There are a few "states" and then a number of flags which can be applied:
262 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
263 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
264 /// move on to `ChannelReady`.
265 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
266 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
267 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
269 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
270 OurInitSent = 1 << 0,
271 /// Implies we have received their `open_channel`/`accept_channel` message
272 TheirInitSent = 1 << 1,
273 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
274 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
275 /// upon receipt of `funding_created`, so simply skip this state.
277 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
278 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
279 /// and our counterparty consider the funding transaction confirmed.
281 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
282 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
283 TheirChannelReady = 1 << 4,
284 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
285 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
286 OurChannelReady = 1 << 5,
288 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
289 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
291 PeerDisconnected = 1 << 7,
292 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
293 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
294 /// sending any outbound messages until they've managed to finish.
295 MonitorUpdateInProgress = 1 << 8,
296 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
297 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
298 /// messages as then we will be unable to determine which HTLCs they included in their
299 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
301 /// Flag is set on `ChannelReady`.
302 AwaitingRemoteRevoke = 1 << 9,
303 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
304 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
305 /// to respond with our own shutdown message when possible.
306 RemoteShutdownSent = 1 << 10,
307 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
308 /// point, we may not add any new HTLCs to the channel.
309 LocalShutdownSent = 1 << 11,
310 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
311 /// to drop us, but we store this anyway.
312 ShutdownComplete = 4096,
313 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
314 /// broadcasting of the funding transaction is being held until all channels in the batch
315 /// have received funding_signed and have their monitors persisted.
316 WaitingForBatch = 1 << 13,
318 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
319 ChannelState::LocalShutdownSent as u32 |
320 ChannelState::RemoteShutdownSent as u32;
321 const MULTI_STATE_FLAGS: u32 =
322 BOTH_SIDES_SHUTDOWN_MASK |
323 ChannelState::PeerDisconnected as u32 |
324 ChannelState::MonitorUpdateInProgress as u32;
325 const STATE_FLAGS: u32 =
327 ChannelState::TheirChannelReady as u32 |
328 ChannelState::OurChannelReady as u32 |
329 ChannelState::AwaitingRemoteRevoke as u32 |
330 ChannelState::WaitingForBatch as u32;
332 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
334 pub const DEFAULT_MAX_HTLCS: u16 = 50;
336 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
338 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
339 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
340 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
341 /// `holder_max_htlc_value_in_flight_msat`.
342 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
344 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
345 /// `option_support_large_channel` (aka wumbo channels) is not supported.
347 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
349 /// Total bitcoin supply in satoshis.
350 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
352 /// The maximum network dust limit for standard script formats. This currently represents the
353 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
354 /// transaction non-standard and thus refuses to relay it.
355 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
356 /// implementations use this value for their dust limit today.
357 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
359 /// The maximum channel dust limit we will accept from our counterparty.
360 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
362 /// The dust limit is used for both the commitment transaction outputs as well as the closing
363 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
364 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
365 /// In order to avoid having to concern ourselves with standardness during the closing process, we
366 /// simply require our counterparty to use a dust limit which will leave any segwit output
368 /// See <https://github.com/lightning/bolts/issues/905> for more details.
369 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
371 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
372 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
374 /// Used to return a simple Error back to ChannelManager. Will get converted to a
375 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
376 /// channel_id in ChannelManager.
377 pub(super) enum ChannelError {
383 impl fmt::Debug for ChannelError {
384 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
386 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
387 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
388 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
393 impl fmt::Display for ChannelError {
394 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
396 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
397 &ChannelError::Warn(ref e) => write!(f, "{}", e),
398 &ChannelError::Close(ref e) => write!(f, "{}", e),
403 macro_rules! secp_check {
404 ($res: expr, $err: expr) => {
407 Err(_) => return Err(ChannelError::Close($err)),
412 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
413 /// our counterparty or not. However, we don't want to announce updates right away to avoid
414 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
415 /// our channel_update message and track the current state here.
416 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
417 #[derive(Clone, Copy, PartialEq)]
418 pub(super) enum ChannelUpdateStatus {
419 /// We've announced the channel as enabled and are connected to our peer.
421 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
423 /// Our channel is live again, but we haven't announced the channel as enabled yet.
425 /// We've announced the channel as disabled.
429 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
431 pub enum AnnouncementSigsState {
432 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
433 /// we sent the last `AnnouncementSignatures`.
435 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
436 /// This state never appears on disk - instead we write `NotSent`.
438 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
439 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
440 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
441 /// they send back a `RevokeAndACK`.
442 /// This state never appears on disk - instead we write `NotSent`.
444 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
445 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
449 /// An enum indicating whether the local or remote side offered a given HTLC.
455 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
458 pending_htlcs_value_msat: u64,
459 on_counterparty_tx_dust_exposure_msat: u64,
460 on_holder_tx_dust_exposure_msat: u64,
461 holding_cell_msat: u64,
462 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
465 /// An enum gathering stats on commitment transaction, either local or remote.
466 struct CommitmentStats<'a> {
467 tx: CommitmentTransaction, // the transaction info
468 feerate_per_kw: u32, // the feerate included to build the transaction
469 total_fee_sat: u64, // the total fee included in the transaction
470 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
471 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
472 local_balance_msat: u64, // local balance before fees but considering dust limits
473 remote_balance_msat: u64, // remote balance before fees but considering dust limits
474 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
477 /// Used when calculating whether we or the remote can afford an additional HTLC.
478 struct HTLCCandidate {
480 origin: HTLCInitiator,
484 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
492 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
494 enum UpdateFulfillFetch {
496 monitor_update: ChannelMonitorUpdate,
497 htlc_value_msat: u64,
498 msg: Option<msgs::UpdateFulfillHTLC>,
503 /// The return type of get_update_fulfill_htlc_and_commit.
504 pub enum UpdateFulfillCommitFetch {
505 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
506 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
507 /// previously placed in the holding cell (and has since been removed).
509 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
510 monitor_update: ChannelMonitorUpdate,
511 /// The value of the HTLC which was claimed, in msat.
512 htlc_value_msat: u64,
514 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
515 /// or has been forgotten (presumably previously claimed).
519 /// The return value of `monitor_updating_restored`
520 pub(super) struct MonitorRestoreUpdates {
521 pub raa: Option<msgs::RevokeAndACK>,
522 pub commitment_update: Option<msgs::CommitmentUpdate>,
523 pub order: RAACommitmentOrder,
524 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
525 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
526 pub finalized_claimed_htlcs: Vec<HTLCSource>,
527 pub funding_broadcastable: Option<Transaction>,
528 pub channel_ready: Option<msgs::ChannelReady>,
529 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
532 /// The return value of `signer_maybe_unblocked`
534 pub(super) struct SignerResumeUpdates {
535 pub commitment_update: Option<msgs::CommitmentUpdate>,
536 pub funding_signed: Option<msgs::FundingSigned>,
537 pub funding_created: Option<msgs::FundingCreated>,
538 pub channel_ready: Option<msgs::ChannelReady>,
541 /// The return value of `channel_reestablish`
542 pub(super) struct ReestablishResponses {
543 pub channel_ready: Option<msgs::ChannelReady>,
544 pub raa: Option<msgs::RevokeAndACK>,
545 pub commitment_update: Option<msgs::CommitmentUpdate>,
546 pub order: RAACommitmentOrder,
547 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
548 pub shutdown_msg: Option<msgs::Shutdown>,
551 /// The result of a shutdown that should be handled.
553 pub(crate) struct ShutdownResult {
554 /// A channel monitor update to apply.
555 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
556 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
557 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
558 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
559 /// propagated to the remainder of the batch.
560 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
563 /// If the majority of the channels funds are to the fundee and the initiator holds only just
564 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
565 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
566 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
567 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
568 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
569 /// by this multiple without hitting this case, before sending.
570 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
571 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
572 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
573 /// leave the channel less usable as we hold a bigger reserve.
574 #[cfg(any(fuzzing, test))]
575 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
576 #[cfg(not(any(fuzzing, test)))]
577 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
579 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
580 /// channel creation on an inbound channel, we simply force-close and move on.
581 /// This constant is the one suggested in BOLT 2.
582 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
584 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
585 /// not have enough balance value remaining to cover the onchain cost of this new
586 /// HTLC weight. If this happens, our counterparty fails the reception of our
587 /// commitment_signed including this new HTLC due to infringement on the channel
589 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
590 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
591 /// leads to a channel force-close. Ultimately, this is an issue coming from the
592 /// design of LN state machines, allowing asynchronous updates.
593 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
595 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
596 /// commitment transaction fees, with at least this many HTLCs present on the commitment
597 /// transaction (not counting the value of the HTLCs themselves).
598 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
600 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
601 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
602 /// ChannelUpdate prompted by the config update. This value was determined as follows:
604 /// * The expected interval between ticks (1 minute).
605 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
606 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
607 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
608 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
610 /// The number of ticks that may elapse while we're waiting for a response to a
611 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
614 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
615 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
617 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
618 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
619 /// exceeding this age limit will be force-closed and purged from memory.
620 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
622 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
623 pub(crate) const COINBASE_MATURITY: u32 = 100;
625 struct PendingChannelMonitorUpdate {
626 update: ChannelMonitorUpdate,
629 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
630 (0, update, required),
633 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
634 /// its variants containing an appropriate channel struct.
635 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
636 UnfundedOutboundV1(OutboundV1Channel<SP>),
637 UnfundedInboundV1(InboundV1Channel<SP>),
641 impl<'a, SP: Deref> ChannelPhase<SP> where
642 SP::Target: SignerProvider,
643 <SP::Target as SignerProvider>::Signer: ChannelSigner,
645 pub fn context(&'a self) -> &'a ChannelContext<SP> {
647 ChannelPhase::Funded(chan) => &chan.context,
648 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
649 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
653 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
655 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
656 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
657 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
662 /// Contains all state common to unfunded inbound/outbound channels.
663 pub(super) struct UnfundedChannelContext {
664 /// A counter tracking how many ticks have elapsed since this unfunded channel was
665 /// created. If this unfunded channel reaches peer has yet to respond after reaching
666 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
668 /// This is so that we don't keep channels around that haven't progressed to a funded state
669 /// in a timely manner.
670 unfunded_channel_age_ticks: usize,
673 impl UnfundedChannelContext {
674 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
675 /// having reached the unfunded channel age limit.
677 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
678 pub fn should_expire_unfunded_channel(&mut self) -> bool {
679 self.unfunded_channel_age_ticks += 1;
680 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
684 /// Contains everything about the channel including state, and various flags.
685 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
686 config: LegacyChannelConfig,
688 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
689 // constructed using it. The second element in the tuple corresponds to the number of ticks that
690 // have elapsed since the update occurred.
691 prev_config: Option<(ChannelConfig, usize)>,
693 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
697 /// The current channel ID.
698 channel_id: ChannelId,
699 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
700 /// Will be `None` for channels created prior to 0.0.115.
701 temporary_channel_id: Option<ChannelId>,
704 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
705 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
707 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
708 // Note that a number of our tests were written prior to the behavior here which retransmits
709 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
711 #[cfg(any(test, feature = "_test_utils"))]
712 pub(crate) announcement_sigs_state: AnnouncementSigsState,
713 #[cfg(not(any(test, feature = "_test_utils")))]
714 announcement_sigs_state: AnnouncementSigsState,
716 secp_ctx: Secp256k1<secp256k1::All>,
717 channel_value_satoshis: u64,
719 latest_monitor_update_id: u64,
721 holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
722 shutdown_scriptpubkey: Option<ShutdownScript>,
723 destination_script: Script,
725 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
726 // generation start at 0 and count up...this simplifies some parts of implementation at the
727 // cost of others, but should really just be changed.
729 cur_holder_commitment_transaction_number: u64,
730 cur_counterparty_commitment_transaction_number: u64,
731 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
732 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
733 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
734 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
736 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
737 /// need to ensure we resend them in the order we originally generated them. Note that because
738 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
739 /// sufficient to simply set this to the opposite of any message we are generating as we
740 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
741 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
743 resend_order: RAACommitmentOrder,
745 monitor_pending_channel_ready: bool,
746 monitor_pending_revoke_and_ack: bool,
747 monitor_pending_commitment_signed: bool,
749 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
750 // responsible for some of the HTLCs here or not - we don't know whether the update in question
751 // completed or not. We currently ignore these fields entirely when force-closing a channel,
752 // but need to handle this somehow or we run the risk of losing HTLCs!
753 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
754 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
755 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
757 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
758 /// but our signer (initially) refused to give us a signature, we should retry at some point in
759 /// the future when the signer indicates it may have a signature for us.
761 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
762 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
763 signer_pending_commitment_update: bool,
764 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
765 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
766 /// outbound or inbound.
767 signer_pending_funding: bool,
769 // pending_update_fee is filled when sending and receiving update_fee.
771 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
772 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
773 // generating new commitment transactions with exactly the same criteria as inbound/outbound
774 // HTLCs with similar state.
775 pending_update_fee: Option<(u32, FeeUpdateState)>,
776 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
777 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
778 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
779 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
780 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
781 holding_cell_update_fee: Option<u32>,
782 next_holder_htlc_id: u64,
783 next_counterparty_htlc_id: u64,
786 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
787 /// when the channel is updated in ways which may impact the `channel_update` message or when a
788 /// new block is received, ensuring it's always at least moderately close to the current real
790 update_time_counter: u32,
792 #[cfg(debug_assertions)]
793 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
794 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
795 #[cfg(debug_assertions)]
796 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
797 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
799 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
800 target_closing_feerate_sats_per_kw: Option<u32>,
802 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
803 /// update, we need to delay processing it until later. We do that here by simply storing the
804 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
805 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
807 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
808 /// transaction. These are set once we reach `closing_negotiation_ready`.
810 pub(crate) closing_fee_limits: Option<(u64, u64)>,
812 closing_fee_limits: Option<(u64, u64)>,
814 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
815 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
816 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
817 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
818 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
820 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
821 /// until we see a `commitment_signed` before doing so.
823 /// We don't bother to persist this - we anticipate this state won't last longer than a few
824 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
825 expecting_peer_commitment_signed: bool,
827 /// The hash of the block in which the funding transaction was included.
828 funding_tx_confirmed_in: Option<BlockHash>,
829 funding_tx_confirmation_height: u32,
830 short_channel_id: Option<u64>,
831 /// Either the height at which this channel was created or the height at which it was last
832 /// serialized if it was serialized by versions prior to 0.0.103.
833 /// We use this to close if funding is never broadcasted.
834 channel_creation_height: u32,
836 counterparty_dust_limit_satoshis: u64,
839 pub(super) holder_dust_limit_satoshis: u64,
841 holder_dust_limit_satoshis: u64,
844 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
846 counterparty_max_htlc_value_in_flight_msat: u64,
849 pub(super) holder_max_htlc_value_in_flight_msat: u64,
851 holder_max_htlc_value_in_flight_msat: u64,
853 /// minimum channel reserve for self to maintain - set by them.
854 counterparty_selected_channel_reserve_satoshis: Option<u64>,
857 pub(super) holder_selected_channel_reserve_satoshis: u64,
859 holder_selected_channel_reserve_satoshis: u64,
861 counterparty_htlc_minimum_msat: u64,
862 holder_htlc_minimum_msat: u64,
864 pub counterparty_max_accepted_htlcs: u16,
866 counterparty_max_accepted_htlcs: u16,
867 holder_max_accepted_htlcs: u16,
868 minimum_depth: Option<u32>,
870 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
872 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
873 funding_transaction: Option<Transaction>,
874 is_batch_funding: Option<()>,
876 counterparty_cur_commitment_point: Option<PublicKey>,
877 counterparty_prev_commitment_point: Option<PublicKey>,
878 counterparty_node_id: PublicKey,
880 counterparty_shutdown_scriptpubkey: Option<Script>,
882 commitment_secrets: CounterpartyCommitmentSecrets,
884 channel_update_status: ChannelUpdateStatus,
885 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
886 /// not complete within a single timer tick (one minute), we should force-close the channel.
887 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
889 /// Note that this field is reset to false on deserialization to give us a chance to connect to
890 /// our peer and start the closing_signed negotiation fresh.
891 closing_signed_in_flight: bool,
893 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
894 /// This can be used to rebroadcast the channel_announcement message later.
895 announcement_sigs: Option<(Signature, Signature)>,
897 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
898 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
899 // be, by comparing the cached values to the fee of the tranaction generated by
900 // `build_commitment_transaction`.
901 #[cfg(any(test, fuzzing))]
902 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
903 #[cfg(any(test, fuzzing))]
904 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
906 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
907 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
908 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
909 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
910 /// message until we receive a channel_reestablish.
912 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
913 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
915 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
916 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
917 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
918 /// unblock the state machine.
920 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
921 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
922 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
924 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
925 /// [`msgs::RevokeAndACK`] message from the counterparty.
926 sent_message_awaiting_response: Option<usize>,
928 #[cfg(any(test, fuzzing))]
929 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
930 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
931 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
932 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
933 // is fine, but as a sanity check in our failure to generate the second claim, we check here
934 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
935 historical_inbound_htlc_fulfills: HashSet<u64>,
937 /// This channel's type, as negotiated during channel open
938 channel_type: ChannelTypeFeatures,
940 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
941 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
942 // the channel's funding UTXO.
944 // We also use this when sending our peer a channel_update that isn't to be broadcasted
945 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
946 // associated channel mapping.
948 // We only bother storing the most recent SCID alias at any time, though our counterparty has
949 // to store all of them.
950 latest_inbound_scid_alias: Option<u64>,
952 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
953 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
954 // don't currently support node id aliases and eventually privacy should be provided with
955 // blinded paths instead of simple scid+node_id aliases.
956 outbound_scid_alias: u64,
958 // We track whether we already emitted a `ChannelPending` event.
959 channel_pending_event_emitted: bool,
961 // We track whether we already emitted a `ChannelReady` event.
962 channel_ready_event_emitted: bool,
964 /// The unique identifier used to re-derive the private key material for the channel through
965 /// [`SignerProvider::derive_channel_signer`].
966 channel_keys_id: [u8; 32],
968 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
969 /// store it here and only release it to the `ChannelManager` once it asks for it.
970 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
973 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
974 /// Allowed in any state (including after shutdown)
975 pub fn get_update_time_counter(&self) -> u32 {
976 self.update_time_counter
979 pub fn get_latest_monitor_update_id(&self) -> u64 {
980 self.latest_monitor_update_id
983 pub fn should_announce(&self) -> bool {
984 self.config.announced_channel
987 pub fn is_outbound(&self) -> bool {
988 self.channel_transaction_parameters.is_outbound_from_holder
991 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
992 /// Allowed in any state (including after shutdown)
993 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
994 self.config.options.forwarding_fee_base_msat
997 /// Returns true if we've ever received a message from the remote end for this Channel
998 pub fn have_received_message(&self) -> bool {
999 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1002 /// Returns true if this channel is fully established and not known to be closing.
1003 /// Allowed in any state (including after shutdown)
1004 pub fn is_usable(&self) -> bool {
1005 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1006 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1009 /// shutdown state returns the state of the channel in its various stages of shutdown
1010 pub fn shutdown_state(&self) -> ChannelShutdownState {
1011 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1012 return ChannelShutdownState::ShutdownComplete;
1014 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1015 return ChannelShutdownState::ShutdownInitiated;
1017 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1018 return ChannelShutdownState::ResolvingHTLCs;
1020 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1021 return ChannelShutdownState::NegotiatingClosingFee;
1023 return ChannelShutdownState::NotShuttingDown;
1026 fn closing_negotiation_ready(&self) -> bool {
1027 self.pending_inbound_htlcs.is_empty() &&
1028 self.pending_outbound_htlcs.is_empty() &&
1029 self.pending_update_fee.is_none() &&
1030 self.channel_state &
1031 (BOTH_SIDES_SHUTDOWN_MASK |
1032 ChannelState::AwaitingRemoteRevoke as u32 |
1033 ChannelState::PeerDisconnected as u32 |
1034 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1037 /// Returns true if this channel is currently available for use. This is a superset of
1038 /// is_usable() and considers things like the channel being temporarily disabled.
1039 /// Allowed in any state (including after shutdown)
1040 pub fn is_live(&self) -> bool {
1041 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1044 // Public utilities:
1046 pub fn channel_id(&self) -> ChannelId {
1050 // Return the `temporary_channel_id` used during channel establishment.
1052 // Will return `None` for channels created prior to LDK version 0.0.115.
1053 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1054 self.temporary_channel_id
1057 pub fn minimum_depth(&self) -> Option<u32> {
1061 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1062 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1063 pub fn get_user_id(&self) -> u128 {
1067 /// Gets the channel's type
1068 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1072 /// Gets the channel's `short_channel_id`.
1074 /// Will return `None` if the channel hasn't been confirmed yet.
1075 pub fn get_short_channel_id(&self) -> Option<u64> {
1076 self.short_channel_id
1079 /// Allowed in any state (including after shutdown)
1080 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1081 self.latest_inbound_scid_alias
1084 /// Allowed in any state (including after shutdown)
1085 pub fn outbound_scid_alias(&self) -> u64 {
1086 self.outbound_scid_alias
1089 /// Returns the holder signer for this channel.
1091 pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
1092 return &self.holder_signer
1095 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1096 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1097 /// or prior to any channel actions during `Channel` initialization.
1098 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1099 debug_assert_eq!(self.outbound_scid_alias, 0);
1100 self.outbound_scid_alias = outbound_scid_alias;
1103 /// Returns the funding_txo we either got from our peer, or were given by
1104 /// get_funding_created.
1105 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1106 self.channel_transaction_parameters.funding_outpoint
1109 /// Returns the block hash in which our funding transaction was confirmed.
1110 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1111 self.funding_tx_confirmed_in
1114 /// Returns the current number of confirmations on the funding transaction.
1115 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1116 if self.funding_tx_confirmation_height == 0 {
1117 // We either haven't seen any confirmation yet, or observed a reorg.
1121 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1124 fn get_holder_selected_contest_delay(&self) -> u16 {
1125 self.channel_transaction_parameters.holder_selected_contest_delay
1128 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1129 &self.channel_transaction_parameters.holder_pubkeys
1132 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1133 self.channel_transaction_parameters.counterparty_parameters
1134 .as_ref().map(|params| params.selected_contest_delay)
1137 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1138 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1141 /// Allowed in any state (including after shutdown)
1142 pub fn get_counterparty_node_id(&self) -> PublicKey {
1143 self.counterparty_node_id
1146 /// Allowed in any state (including after shutdown)
1147 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1148 self.holder_htlc_minimum_msat
1151 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1152 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1153 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1156 /// Allowed in any state (including after shutdown)
1157 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1159 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1160 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1161 // channel might have been used to route very small values (either by honest users or as DoS).
1162 self.channel_value_satoshis * 1000 * 9 / 10,
1164 self.counterparty_max_htlc_value_in_flight_msat
1168 /// Allowed in any state (including after shutdown)
1169 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1170 self.counterparty_htlc_minimum_msat
1173 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1174 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1175 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1178 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1179 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1180 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1182 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1183 party_max_htlc_value_in_flight_msat
1188 pub fn get_value_satoshis(&self) -> u64 {
1189 self.channel_value_satoshis
1192 pub fn get_fee_proportional_millionths(&self) -> u32 {
1193 self.config.options.forwarding_fee_proportional_millionths
1196 pub fn get_cltv_expiry_delta(&self) -> u16 {
1197 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1200 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1201 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1202 where F::Target: FeeEstimator
1204 match self.config.options.max_dust_htlc_exposure {
1205 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1206 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1207 ConfirmationTarget::OnChainSweep) as u64;
1208 feerate_per_kw.saturating_mul(multiplier)
1210 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1214 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1215 pub fn prev_config(&self) -> Option<ChannelConfig> {
1216 self.prev_config.map(|prev_config| prev_config.0)
1219 // Checks whether we should emit a `ChannelPending` event.
1220 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1221 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1224 // Returns whether we already emitted a `ChannelPending` event.
1225 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1226 self.channel_pending_event_emitted
1229 // Remembers that we already emitted a `ChannelPending` event.
1230 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1231 self.channel_pending_event_emitted = true;
1234 // Checks whether we should emit a `ChannelReady` event.
1235 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1236 self.is_usable() && !self.channel_ready_event_emitted
1239 // Remembers that we already emitted a `ChannelReady` event.
1240 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1241 self.channel_ready_event_emitted = true;
1244 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1245 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1246 /// no longer be considered when forwarding HTLCs.
1247 pub fn maybe_expire_prev_config(&mut self) {
1248 if self.prev_config.is_none() {
1251 let prev_config = self.prev_config.as_mut().unwrap();
1253 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1254 self.prev_config = None;
1258 /// Returns the current [`ChannelConfig`] applied to the channel.
1259 pub fn config(&self) -> ChannelConfig {
1263 /// Updates the channel's config. A bool is returned indicating whether the config update
1264 /// applied resulted in a new ChannelUpdate message.
1265 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1266 let did_channel_update =
1267 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1268 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1269 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1270 if did_channel_update {
1271 self.prev_config = Some((self.config.options, 0));
1272 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1273 // policy change to propagate throughout the network.
1274 self.update_time_counter += 1;
1276 self.config.options = *config;
1280 /// Returns true if funding_signed was sent/received and the
1281 /// funding transaction has been broadcast if necessary.
1282 pub fn is_funding_broadcast(&self) -> bool {
1283 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1284 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1287 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1288 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1289 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1290 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1291 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1293 /// @local is used only to convert relevant internal structures which refer to remote vs local
1294 /// to decide value of outputs and direction of HTLCs.
1295 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1296 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1297 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1298 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1299 /// which peer generated this transaction and "to whom" this transaction flows.
1301 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1302 where L::Target: Logger
1304 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1305 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1306 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1308 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1309 let mut remote_htlc_total_msat = 0;
1310 let mut local_htlc_total_msat = 0;
1311 let mut value_to_self_msat_offset = 0;
1313 let mut feerate_per_kw = self.feerate_per_kw;
1314 if let Some((feerate, update_state)) = self.pending_update_fee {
1315 if match update_state {
1316 // Note that these match the inclusion criteria when scanning
1317 // pending_inbound_htlcs below.
1318 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1319 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1320 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1322 feerate_per_kw = feerate;
1326 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1327 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1328 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1330 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1332 macro_rules! get_htlc_in_commitment {
1333 ($htlc: expr, $offered: expr) => {
1334 HTLCOutputInCommitment {
1336 amount_msat: $htlc.amount_msat,
1337 cltv_expiry: $htlc.cltv_expiry,
1338 payment_hash: $htlc.payment_hash,
1339 transaction_output_index: None
1344 macro_rules! add_htlc_output {
1345 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1346 if $outbound == local { // "offered HTLC output"
1347 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1348 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1351 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1353 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1354 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1355 included_non_dust_htlcs.push((htlc_in_tx, $source));
1357 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1358 included_dust_htlcs.push((htlc_in_tx, $source));
1361 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1362 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1365 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1367 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1368 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1369 included_non_dust_htlcs.push((htlc_in_tx, $source));
1371 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1372 included_dust_htlcs.push((htlc_in_tx, $source));
1378 for ref htlc in self.pending_inbound_htlcs.iter() {
1379 let (include, state_name) = match htlc.state {
1380 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1381 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1382 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1383 InboundHTLCState::Committed => (true, "Committed"),
1384 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1388 add_htlc_output!(htlc, false, None, state_name);
1389 remote_htlc_total_msat += htlc.amount_msat;
1391 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1393 &InboundHTLCState::LocalRemoved(ref reason) => {
1394 if generated_by_local {
1395 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1396 value_to_self_msat_offset += htlc.amount_msat as i64;
1405 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1407 for ref htlc in self.pending_outbound_htlcs.iter() {
1408 let (include, state_name) = match htlc.state {
1409 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1410 OutboundHTLCState::Committed => (true, "Committed"),
1411 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1412 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1413 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1416 let preimage_opt = match htlc.state {
1417 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1418 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1419 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1423 if let Some(preimage) = preimage_opt {
1424 preimages.push(preimage);
1428 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1429 local_htlc_total_msat += htlc.amount_msat;
1431 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1433 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1434 value_to_self_msat_offset -= htlc.amount_msat as i64;
1436 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1437 if !generated_by_local {
1438 value_to_self_msat_offset -= htlc.amount_msat as i64;
1446 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1447 assert!(value_to_self_msat >= 0);
1448 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1449 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1450 // "violate" their reserve value by couting those against it. Thus, we have to convert
1451 // everything to i64 before subtracting as otherwise we can overflow.
1452 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1453 assert!(value_to_remote_msat >= 0);
1455 #[cfg(debug_assertions)]
1457 // Make sure that the to_self/to_remote is always either past the appropriate
1458 // channel_reserve *or* it is making progress towards it.
1459 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1460 self.holder_max_commitment_tx_output.lock().unwrap()
1462 self.counterparty_max_commitment_tx_output.lock().unwrap()
1464 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1465 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1466 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1467 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1470 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1471 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1472 let (value_to_self, value_to_remote) = if self.is_outbound() {
1473 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1475 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1478 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1479 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1480 let (funding_pubkey_a, funding_pubkey_b) = if local {
1481 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1483 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1486 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1487 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1492 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1493 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1498 let num_nondust_htlcs = included_non_dust_htlcs.len();
1500 let channel_parameters =
1501 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1502 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1503 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1510 &mut included_non_dust_htlcs,
1513 let mut htlcs_included = included_non_dust_htlcs;
1514 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1515 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1516 htlcs_included.append(&mut included_dust_htlcs);
1518 // For the stats, trimmed-to-0 the value in msats accordingly
1519 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1520 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1528 local_balance_msat: value_to_self_msat as u64,
1529 remote_balance_msat: value_to_remote_msat as u64,
1535 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1536 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1537 /// our counterparty!)
1538 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1539 /// TODO Some magic rust shit to compile-time check this?
1540 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1541 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1542 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1543 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1544 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1546 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1550 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1551 /// will sign and send to our counterparty.
1552 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1553 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1554 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1555 //may see payments to it!
1556 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1557 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1558 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1560 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1563 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1564 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1565 /// Panics if called before accept_channel/InboundV1Channel::new
1566 pub fn get_funding_redeemscript(&self) -> Script {
1567 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1570 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1571 &self.get_counterparty_pubkeys().funding_pubkey
1574 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1578 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1579 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1580 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1581 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1582 // more dust balance if the feerate increases when we have several HTLCs pending
1583 // which are near the dust limit.
1584 let mut feerate_per_kw = self.feerate_per_kw;
1585 // If there's a pending update fee, use it to ensure we aren't under-estimating
1586 // potential feerate updates coming soon.
1587 if let Some((feerate, _)) = self.pending_update_fee {
1588 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1590 if let Some(feerate) = outbound_feerate_update {
1591 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1593 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1596 /// Get forwarding information for the counterparty.
1597 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1598 self.counterparty_forwarding_info.clone()
1601 /// Returns a HTLCStats about inbound pending htlcs
1602 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1604 let mut stats = HTLCStats {
1605 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1606 pending_htlcs_value_msat: 0,
1607 on_counterparty_tx_dust_exposure_msat: 0,
1608 on_holder_tx_dust_exposure_msat: 0,
1609 holding_cell_msat: 0,
1610 on_holder_tx_holding_cell_htlcs_count: 0,
1613 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1616 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1617 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1618 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1620 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1621 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1622 for ref htlc in context.pending_inbound_htlcs.iter() {
1623 stats.pending_htlcs_value_msat += htlc.amount_msat;
1624 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1625 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1627 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1628 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1634 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1635 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1637 let mut stats = HTLCStats {
1638 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1639 pending_htlcs_value_msat: 0,
1640 on_counterparty_tx_dust_exposure_msat: 0,
1641 on_holder_tx_dust_exposure_msat: 0,
1642 holding_cell_msat: 0,
1643 on_holder_tx_holding_cell_htlcs_count: 0,
1646 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1649 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1650 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1651 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1653 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1654 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1655 for ref htlc in context.pending_outbound_htlcs.iter() {
1656 stats.pending_htlcs_value_msat += htlc.amount_msat;
1657 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1658 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1660 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1661 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1665 for update in context.holding_cell_htlc_updates.iter() {
1666 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1667 stats.pending_htlcs += 1;
1668 stats.pending_htlcs_value_msat += amount_msat;
1669 stats.holding_cell_msat += amount_msat;
1670 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1671 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1673 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1674 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1676 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1683 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1684 /// Doesn't bother handling the
1685 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1686 /// corner case properly.
1687 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1688 -> AvailableBalances
1689 where F::Target: FeeEstimator
1691 let context = &self;
1692 // Note that we have to handle overflow due to the above case.
1693 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1694 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1696 let mut balance_msat = context.value_to_self_msat;
1697 for ref htlc in context.pending_inbound_htlcs.iter() {
1698 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1699 balance_msat += htlc.amount_msat;
1702 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1704 let outbound_capacity_msat = context.value_to_self_msat
1705 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1707 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1709 let mut available_capacity_msat = outbound_capacity_msat;
1711 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1712 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1716 if context.is_outbound() {
1717 // We should mind channel commit tx fee when computing how much of the available capacity
1718 // can be used in the next htlc. Mirrors the logic in send_htlc.
1720 // The fee depends on whether the amount we will be sending is above dust or not,
1721 // and the answer will in turn change the amount itself — making it a circular
1723 // This complicates the computation around dust-values, up to the one-htlc-value.
1724 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1725 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1726 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1729 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1730 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1731 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1732 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1733 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1734 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1735 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1738 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1739 // value ends up being below dust, we have this fee available again. In that case,
1740 // match the value to right-below-dust.
1741 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1742 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1743 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1744 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1745 debug_assert!(one_htlc_difference_msat != 0);
1746 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1747 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1748 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1750 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1753 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1754 // sending a new HTLC won't reduce their balance below our reserve threshold.
1755 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1756 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1757 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1760 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1761 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1763 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1764 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1765 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1767 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1768 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1769 // we've selected for them, we can only send dust HTLCs.
1770 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1774 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1776 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1777 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1778 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1779 // send above the dust limit (as the router can always overpay to meet the dust limit).
1780 let mut remaining_msat_below_dust_exposure_limit = None;
1781 let mut dust_exposure_dust_limit_msat = 0;
1782 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1784 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1785 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1787 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1788 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1789 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1791 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1792 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1793 remaining_msat_below_dust_exposure_limit =
1794 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1795 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1798 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1799 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1800 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1801 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1802 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1803 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1806 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1807 if available_capacity_msat < dust_exposure_dust_limit_msat {
1808 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1810 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1814 available_capacity_msat = cmp::min(available_capacity_msat,
1815 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1817 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1818 available_capacity_msat = 0;
1822 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1823 - context.value_to_self_msat as i64
1824 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1825 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1827 outbound_capacity_msat,
1828 next_outbound_htlc_limit_msat: available_capacity_msat,
1829 next_outbound_htlc_minimum_msat,
1834 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1835 let context = &self;
1836 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1839 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1840 /// number of pending HTLCs that are on track to be in our next commitment tx.
1842 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1843 /// `fee_spike_buffer_htlc` is `Some`.
1845 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1846 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1848 /// Dust HTLCs are excluded.
1849 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1850 let context = &self;
1851 assert!(context.is_outbound());
1853 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1856 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1857 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1859 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1860 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1862 let mut addl_htlcs = 0;
1863 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1865 HTLCInitiator::LocalOffered => {
1866 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1870 HTLCInitiator::RemoteOffered => {
1871 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1877 let mut included_htlcs = 0;
1878 for ref htlc in context.pending_inbound_htlcs.iter() {
1879 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1882 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1883 // transaction including this HTLC if it times out before they RAA.
1884 included_htlcs += 1;
1887 for ref htlc in context.pending_outbound_htlcs.iter() {
1888 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1892 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1893 OutboundHTLCState::Committed => included_htlcs += 1,
1894 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1895 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1896 // transaction won't be generated until they send us their next RAA, which will mean
1897 // dropping any HTLCs in this state.
1902 for htlc in context.holding_cell_htlc_updates.iter() {
1904 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1905 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1910 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1911 // ack we're guaranteed to never include them in commitment txs anymore.
1915 let num_htlcs = included_htlcs + addl_htlcs;
1916 let res = commit_tx_fee_sat(context.feerate_per_kw, num_htlcs, &context.channel_type) * 1000;
1917 #[cfg(any(test, fuzzing))]
1920 if fee_spike_buffer_htlc.is_some() {
1921 fee = commit_tx_fee_sat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type) * 1000;
1923 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1924 + context.holding_cell_htlc_updates.len();
1925 let commitment_tx_info = CommitmentTxInfoCached {
1927 total_pending_htlcs,
1928 next_holder_htlc_id: match htlc.origin {
1929 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1930 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1932 next_counterparty_htlc_id: match htlc.origin {
1933 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1934 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1936 feerate: context.feerate_per_kw,
1938 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1943 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1944 /// pending HTLCs that are on track to be in their next commitment tx
1946 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1947 /// `fee_spike_buffer_htlc` is `Some`.
1949 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1950 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1952 /// Dust HTLCs are excluded.
1953 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1954 let context = &self;
1955 assert!(!context.is_outbound());
1957 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1960 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1961 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1963 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1964 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1966 let mut addl_htlcs = 0;
1967 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1969 HTLCInitiator::LocalOffered => {
1970 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1974 HTLCInitiator::RemoteOffered => {
1975 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1981 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1982 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1983 // committed outbound HTLCs, see below.
1984 let mut included_htlcs = 0;
1985 for ref htlc in context.pending_inbound_htlcs.iter() {
1986 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1989 included_htlcs += 1;
1992 for ref htlc in context.pending_outbound_htlcs.iter() {
1993 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1996 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1997 // i.e. if they've responded to us with an RAA after announcement.
1999 OutboundHTLCState::Committed => included_htlcs += 1,
2000 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2001 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2006 let num_htlcs = included_htlcs + addl_htlcs;
2007 let res = commit_tx_fee_sat(context.feerate_per_kw, num_htlcs, &context.channel_type) * 1000;
2008 #[cfg(any(test, fuzzing))]
2011 if fee_spike_buffer_htlc.is_some() {
2012 fee = commit_tx_fee_sat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type) * 1000;
2014 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2015 let commitment_tx_info = CommitmentTxInfoCached {
2017 total_pending_htlcs,
2018 next_holder_htlc_id: match htlc.origin {
2019 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2020 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2022 next_counterparty_htlc_id: match htlc.origin {
2023 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2024 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2026 feerate: context.feerate_per_kw,
2028 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2033 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2034 where F: Fn() -> Option<O> {
2035 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2036 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2043 /// Returns the transaction if there is a pending funding transaction that is yet to be
2045 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2046 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2049 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2051 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2052 self.if_unbroadcasted_funding(||
2053 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2057 /// Returns whether the channel is funded in a batch.
2058 pub fn is_batch_funding(&self) -> bool {
2059 self.is_batch_funding.is_some()
2062 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2064 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2065 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2068 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2069 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2070 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2071 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2072 /// immediately (others we will have to allow to time out).
2073 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2074 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2075 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2076 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2077 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2078 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2080 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2081 // return them to fail the payment.
2082 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2083 let counterparty_node_id = self.get_counterparty_node_id();
2084 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2086 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2087 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2092 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2093 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2094 // returning a channel monitor update here would imply a channel monitor update before
2095 // we even registered the channel monitor to begin with, which is invalid.
2096 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2097 // funding transaction, don't return a funding txo (which prevents providing the
2098 // monitor update to the user, even if we return one).
2099 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2100 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2101 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2102 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2103 update_id: self.latest_monitor_update_id,
2104 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2108 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2110 self.channel_state = ChannelState::ShutdownComplete as u32;
2111 self.update_time_counter += 1;
2114 dropped_outbound_htlcs,
2115 unbroadcasted_batch_funding_txid,
2119 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2120 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2121 let counterparty_keys = self.build_remote_transaction_keys();
2122 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2123 let signature = match &self.holder_signer {
2124 // TODO (taproot|arik): move match into calling method for Taproot
2125 ChannelSignerType::Ecdsa(ecdsa) => {
2126 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2127 .map(|(sig, _)| sig).ok()?
2131 if self.signer_pending_funding {
2132 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2133 self.signer_pending_funding = false;
2136 Some(msgs::FundingCreated {
2137 temporary_channel_id: self.temporary_channel_id.unwrap(),
2138 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2139 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2142 partial_signature_with_nonce: None,
2144 next_local_nonce: None,
2148 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2149 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2150 let counterparty_keys = self.build_remote_transaction_keys();
2151 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2153 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2154 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2155 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2156 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2158 match &self.holder_signer {
2159 // TODO (arik): move match into calling method for Taproot
2160 ChannelSignerType::Ecdsa(ecdsa) => {
2161 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2162 .map(|(signature, _)| msgs::FundingSigned {
2163 channel_id: self.channel_id(),
2166 partial_signature_with_nonce: None,
2170 if funding_signed.is_none() {
2171 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2172 self.signer_pending_funding = true;
2173 } else if self.signer_pending_funding {
2174 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2175 self.signer_pending_funding = false;
2178 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2179 (counterparty_initial_commitment_tx, funding_signed)
2185 // Internal utility functions for channels
2187 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2188 /// `channel_value_satoshis` in msat, set through
2189 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2191 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2193 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2194 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2195 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2197 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2200 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2202 channel_value_satoshis * 10 * configured_percent
2205 /// Returns a minimum channel reserve value the remote needs to maintain,
2206 /// required by us according to the configured or default
2207 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2209 /// Guaranteed to return a value no larger than channel_value_satoshis
2211 /// This is used both for outbound and inbound channels and has lower bound
2212 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2213 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2214 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2215 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2218 /// This is for legacy reasons, present for forward-compatibility.
2219 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2220 /// from storage. Hence, we use this function to not persist default values of
2221 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2222 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2223 let (q, _) = channel_value_satoshis.overflowing_div(100);
2224 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2227 // Holder designates channel data owned for the benefit of the user client.
2228 // Counterparty designates channel data owned by the another channel participant entity.
2229 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2230 pub context: ChannelContext<SP>,
2233 #[cfg(any(test, fuzzing))]
2234 struct CommitmentTxInfoCached {
2236 total_pending_htlcs: usize,
2237 next_holder_htlc_id: u64,
2238 next_counterparty_htlc_id: u64,
2242 impl<SP: Deref> Channel<SP> where
2243 SP::Target: SignerProvider,
2244 <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2246 fn check_remote_fee<F: Deref, L: Deref>(
2247 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2248 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2249 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2251 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2252 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2253 // We generally don't care too much if they set the feerate to something very high, but it
2254 // could result in the channel being useless due to everything being dust. This doesn't
2255 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2256 // zero fee, so their fee is no longer considered to determine dust limits.
2257 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2259 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64;
2260 if feerate_per_kw as u64 > upper_limit {
2261 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2265 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2266 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2268 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2270 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2271 if feerate_per_kw < lower_limit {
2272 if let Some(cur_feerate) = cur_feerate_per_kw {
2273 if feerate_per_kw > cur_feerate {
2275 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2276 cur_feerate, feerate_per_kw);
2280 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2286 fn get_closing_scriptpubkey(&self) -> Script {
2287 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2288 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2289 // outside of those situations will fail.
2290 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2294 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2299 1 + // script length (0)
2303 )*4 + // * 4 for non-witness parts
2304 2 + // witness marker and flag
2305 1 + // witness element count
2306 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2307 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2308 2*(1 + 71); // two signatures + sighash type flags
2309 if let Some(spk) = a_scriptpubkey {
2310 ret += ((8+1) + // output values and script length
2311 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2313 if let Some(spk) = b_scriptpubkey {
2314 ret += ((8+1) + // output values and script length
2315 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2321 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2322 assert!(self.context.pending_inbound_htlcs.is_empty());
2323 assert!(self.context.pending_outbound_htlcs.is_empty());
2324 assert!(self.context.pending_update_fee.is_none());
2326 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2327 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2328 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2330 if value_to_holder < 0 {
2331 assert!(self.context.is_outbound());
2332 total_fee_satoshis += (-value_to_holder) as u64;
2333 } else if value_to_counterparty < 0 {
2334 assert!(!self.context.is_outbound());
2335 total_fee_satoshis += (-value_to_counterparty) as u64;
2338 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2339 value_to_counterparty = 0;
2342 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2343 value_to_holder = 0;
2346 assert!(self.context.shutdown_scriptpubkey.is_some());
2347 let holder_shutdown_script = self.get_closing_scriptpubkey();
2348 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2349 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2351 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2352 (closing_transaction, total_fee_satoshis)
2355 fn funding_outpoint(&self) -> OutPoint {
2356 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2359 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2362 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2363 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2365 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2367 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2368 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2369 where L::Target: Logger {
2370 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2371 // (see equivalent if condition there).
2372 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2373 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2374 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2375 self.context.latest_monitor_update_id = mon_update_id;
2376 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2377 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2381 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2382 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2383 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2384 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2386 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2387 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2389 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2391 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2392 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2393 // these, but for now we just have to treat them as normal.
2395 let mut pending_idx = core::usize::MAX;
2396 let mut htlc_value_msat = 0;
2397 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2398 if htlc.htlc_id == htlc_id_arg {
2399 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2400 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2401 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2403 InboundHTLCState::Committed => {},
2404 InboundHTLCState::LocalRemoved(ref reason) => {
2405 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2407 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2408 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2410 return UpdateFulfillFetch::DuplicateClaim {};
2413 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2414 // Don't return in release mode here so that we can update channel_monitor
2418 htlc_value_msat = htlc.amount_msat;
2422 if pending_idx == core::usize::MAX {
2423 #[cfg(any(test, fuzzing))]
2424 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2425 // this is simply a duplicate claim, not previously failed and we lost funds.
2426 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2427 return UpdateFulfillFetch::DuplicateClaim {};
2430 // Now update local state:
2432 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2433 // can claim it even if the channel hits the chain before we see their next commitment.
2434 self.context.latest_monitor_update_id += 1;
2435 let monitor_update = ChannelMonitorUpdate {
2436 update_id: self.context.latest_monitor_update_id,
2437 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2438 payment_preimage: payment_preimage_arg.clone(),
2442 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2443 // Note that this condition is the same as the assertion in
2444 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2445 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2446 // do not not get into this branch.
2447 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2448 match pending_update {
2449 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2450 if htlc_id_arg == htlc_id {
2451 // Make sure we don't leave latest_monitor_update_id incremented here:
2452 self.context.latest_monitor_update_id -= 1;
2453 #[cfg(any(test, fuzzing))]
2454 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2455 return UpdateFulfillFetch::DuplicateClaim {};
2458 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2459 if htlc_id_arg == htlc_id {
2460 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2461 // TODO: We may actually be able to switch to a fulfill here, though its
2462 // rare enough it may not be worth the complexity burden.
2463 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2464 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2470 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2471 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2472 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2474 #[cfg(any(test, fuzzing))]
2475 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2476 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2478 #[cfg(any(test, fuzzing))]
2479 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2482 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2483 if let InboundHTLCState::Committed = htlc.state {
2485 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2486 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2488 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2489 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2492 UpdateFulfillFetch::NewClaim {
2495 msg: Some(msgs::UpdateFulfillHTLC {
2496 channel_id: self.context.channel_id(),
2497 htlc_id: htlc_id_arg,
2498 payment_preimage: payment_preimage_arg,
2503 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2504 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2505 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2506 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2507 // Even if we aren't supposed to let new monitor updates with commitment state
2508 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2509 // matter what. Sadly, to push a new monitor update which flies before others
2510 // already queued, we have to insert it into the pending queue and update the
2511 // update_ids of all the following monitors.
2512 if release_cs_monitor && msg.is_some() {
2513 let mut additional_update = self.build_commitment_no_status_check(logger);
2514 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2515 // to be strictly increasing by one, so decrement it here.
2516 self.context.latest_monitor_update_id = monitor_update.update_id;
2517 monitor_update.updates.append(&mut additional_update.updates);
2519 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2520 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2521 monitor_update.update_id = new_mon_id;
2522 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2523 held_update.update.update_id += 1;
2526 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2527 let update = self.build_commitment_no_status_check(logger);
2528 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2534 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2535 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2537 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2541 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2542 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2543 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2544 /// before we fail backwards.
2546 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2547 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2548 /// [`ChannelError::Ignore`].
2549 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2550 -> Result<(), ChannelError> where L::Target: Logger {
2551 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2552 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2555 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2556 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2557 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2558 /// before we fail backwards.
2560 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2561 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2562 /// [`ChannelError::Ignore`].
2563 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2564 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2565 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2566 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2568 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2570 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2571 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2572 // these, but for now we just have to treat them as normal.
2574 let mut pending_idx = core::usize::MAX;
2575 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2576 if htlc.htlc_id == htlc_id_arg {
2578 InboundHTLCState::Committed => {},
2579 InboundHTLCState::LocalRemoved(ref reason) => {
2580 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2582 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2587 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2588 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2594 if pending_idx == core::usize::MAX {
2595 #[cfg(any(test, fuzzing))]
2596 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2597 // is simply a duplicate fail, not previously failed and we failed-back too early.
2598 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2602 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2603 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2604 force_holding_cell = true;
2607 // Now update local state:
2608 if force_holding_cell {
2609 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2610 match pending_update {
2611 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2612 if htlc_id_arg == htlc_id {
2613 #[cfg(any(test, fuzzing))]
2614 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2618 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2619 if htlc_id_arg == htlc_id {
2620 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2621 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2627 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2628 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2629 htlc_id: htlc_id_arg,
2635 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2637 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2638 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2641 Ok(Some(msgs::UpdateFailHTLC {
2642 channel_id: self.context.channel_id(),
2643 htlc_id: htlc_id_arg,
2648 // Message handlers:
2650 /// Handles a funding_signed message from the remote end.
2651 /// If this call is successful, broadcast the funding transaction (and not before!)
2652 pub fn funding_signed<L: Deref>(
2653 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2654 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2658 if !self.context.is_outbound() {
2659 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2661 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2662 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2664 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2665 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2666 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2667 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2670 let funding_script = self.context.get_funding_redeemscript();
2672 let counterparty_keys = self.context.build_remote_transaction_keys();
2673 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2674 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2675 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2677 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2678 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2680 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2681 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2683 let trusted_tx = initial_commitment_tx.trust();
2684 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2685 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2686 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2687 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2688 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2692 let holder_commitment_tx = HolderCommitmentTransaction::new(
2693 initial_commitment_tx,
2696 &self.context.get_holder_pubkeys().funding_pubkey,
2697 self.context.counterparty_funding_pubkey()
2700 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2701 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2704 let funding_redeemscript = self.context.get_funding_redeemscript();
2705 let funding_txo = self.context.get_funding_txo().unwrap();
2706 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2707 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2708 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2709 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2710 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2711 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2712 shutdown_script, self.context.get_holder_selected_contest_delay(),
2713 &self.context.destination_script, (funding_txo, funding_txo_script),
2714 &self.context.channel_transaction_parameters,
2715 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2717 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2719 channel_monitor.provide_initial_counterparty_commitment_tx(
2720 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2721 self.context.cur_counterparty_commitment_transaction_number,
2722 self.context.counterparty_cur_commitment_point.unwrap(),
2723 counterparty_initial_commitment_tx.feerate_per_kw(),
2724 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2725 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2727 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2728 if self.context.is_batch_funding() {
2729 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2731 self.context.channel_state = ChannelState::FundingSent as u32;
2733 self.context.cur_holder_commitment_transaction_number -= 1;
2734 self.context.cur_counterparty_commitment_transaction_number -= 1;
2736 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2738 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2739 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2743 /// Updates the state of the channel to indicate that all channels in the batch have received
2744 /// funding_signed and persisted their monitors.
2745 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2746 /// treated as a non-batch channel going forward.
2747 pub fn set_batch_ready(&mut self) {
2748 self.context.is_batch_funding = None;
2749 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2752 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2753 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2755 pub fn channel_ready<NS: Deref, L: Deref>(
2756 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2757 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2758 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2760 NS::Target: NodeSigner,
2763 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2764 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2765 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2768 if let Some(scid_alias) = msg.short_channel_id_alias {
2769 if Some(scid_alias) != self.context.short_channel_id {
2770 // The scid alias provided can be used to route payments *from* our counterparty,
2771 // i.e. can be used for inbound payments and provided in invoices, but is not used
2772 // when routing outbound payments.
2773 self.context.latest_inbound_scid_alias = Some(scid_alias);
2777 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2779 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2780 // batch, but we can receive channel_ready messages.
2782 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2783 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2785 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2786 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2787 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2788 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2789 self.context.update_time_counter += 1;
2790 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2791 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2792 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2793 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2795 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2796 // required, or they're sending a fresh SCID alias.
2797 let expected_point =
2798 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2799 // If they haven't ever sent an updated point, the point they send should match
2801 self.context.counterparty_cur_commitment_point
2802 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2803 // If we've advanced the commitment number once, the second commitment point is
2804 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2805 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2806 self.context.counterparty_prev_commitment_point
2808 // If they have sent updated points, channel_ready is always supposed to match
2809 // their "first" point, which we re-derive here.
2810 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2811 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2812 ).expect("We already advanced, so previous secret keys should have been validated already")))
2814 if expected_point != Some(msg.next_per_commitment_point) {
2815 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2819 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2822 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2823 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2825 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2827 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2830 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2831 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2832 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2833 ) -> Result<(), ChannelError>
2834 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2835 FE::Target: FeeEstimator, L::Target: Logger,
2837 // We can't accept HTLCs sent after we've sent a shutdown.
2838 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2839 if local_sent_shutdown {
2840 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2842 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2843 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2844 if remote_sent_shutdown {
2845 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2847 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2848 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2850 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2851 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2853 if msg.amount_msat == 0 {
2854 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2856 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2857 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2860 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2861 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2862 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2863 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2865 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2866 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2869 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2870 // the reserve_satoshis we told them to always have as direct payment so that they lose
2871 // something if we punish them for broadcasting an old state).
2872 // Note that we don't really care about having a small/no to_remote output in our local
2873 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2874 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2875 // present in the next commitment transaction we send them (at least for fulfilled ones,
2876 // failed ones won't modify value_to_self).
2877 // Note that we will send HTLCs which another instance of rust-lightning would think
2878 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2879 // Channel state once they will not be present in the next received commitment
2881 let mut removed_outbound_total_msat = 0;
2882 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2883 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2884 removed_outbound_total_msat += htlc.amount_msat;
2885 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2886 removed_outbound_total_msat += htlc.amount_msat;
2890 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2891 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2894 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2895 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2896 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2898 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2899 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2900 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2901 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2902 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2903 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2904 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2908 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2909 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2910 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2911 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2912 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2913 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2914 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2918 let pending_value_to_self_msat =
2919 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2920 let pending_remote_value_msat =
2921 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2922 if pending_remote_value_msat < msg.amount_msat {
2923 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2926 // Check that the remote can afford to pay for this HTLC on-chain at the current
2927 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2929 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2930 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2931 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2933 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2934 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2938 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2939 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2941 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2942 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2946 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2947 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2951 if !self.context.is_outbound() {
2952 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2953 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2954 // side, only on the sender's. Note that with anchor outputs we are no longer as
2955 // sensitive to fee spikes, so we need to account for them.
2956 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2957 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2958 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2959 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2961 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2962 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2963 // the HTLC, i.e. its status is already set to failing.
2964 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2965 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2968 // Check that they won't violate our local required channel reserve by adding this HTLC.
2969 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2970 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2971 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2972 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2975 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2976 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2978 if msg.cltv_expiry >= 500000000 {
2979 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2982 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2983 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2984 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2988 // Now update local state:
2989 self.context.next_counterparty_htlc_id += 1;
2990 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2991 htlc_id: msg.htlc_id,
2992 amount_msat: msg.amount_msat,
2993 payment_hash: msg.payment_hash,
2994 cltv_expiry: msg.cltv_expiry,
2995 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3000 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3002 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3003 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3004 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3005 if htlc.htlc_id == htlc_id {
3006 let outcome = match check_preimage {
3007 None => fail_reason.into(),
3008 Some(payment_preimage) => {
3009 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
3010 if payment_hash != htlc.payment_hash {
3011 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3013 OutboundHTLCOutcome::Success(Some(payment_preimage))
3017 OutboundHTLCState::LocalAnnounced(_) =>
3018 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3019 OutboundHTLCState::Committed => {
3020 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3022 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3023 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3028 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3031 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3032 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3033 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3035 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3036 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3039 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3042 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3043 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3044 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3046 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3047 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3050 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3054 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3055 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3056 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3058 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3059 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3062 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3066 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3067 where L::Target: Logger
3069 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3070 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3072 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3073 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3075 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3076 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3079 let funding_script = self.context.get_funding_redeemscript();
3081 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3083 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3084 let commitment_txid = {
3085 let trusted_tx = commitment_stats.tx.trust();
3086 let bitcoin_tx = trusted_tx.built_transaction();
3087 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3089 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3090 log_bytes!(msg.signature.serialize_compact()[..]),
3091 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3092 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3093 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3094 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3098 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3100 // If our counterparty updated the channel fee in this commitment transaction, check that
3101 // they can actually afford the new fee now.
3102 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3103 update_state == FeeUpdateState::RemoteAnnounced
3106 debug_assert!(!self.context.is_outbound());
3107 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3108 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3109 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3112 #[cfg(any(test, fuzzing))]
3114 if self.context.is_outbound() {
3115 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3116 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3117 if let Some(info) = projected_commit_tx_info {
3118 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3119 + self.context.holding_cell_htlc_updates.len();
3120 if info.total_pending_htlcs == total_pending_htlcs
3121 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3122 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3123 && info.feerate == self.context.feerate_per_kw {
3124 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3130 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3131 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3134 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3135 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3136 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3137 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3138 // backwards compatibility, we never use it in production. To provide test coverage, here,
3139 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3140 #[allow(unused_assignments, unused_mut)]
3141 let mut separate_nondust_htlc_sources = false;
3142 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3143 use core::hash::{BuildHasher, Hasher};
3144 // Get a random value using the only std API to do so - the DefaultHasher
3145 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3146 separate_nondust_htlc_sources = rand_val % 2 == 0;
3149 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3150 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3151 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3152 if let Some(_) = htlc.transaction_output_index {
3153 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3154 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3155 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3157 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3158 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3159 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3160 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3161 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3162 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3163 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3164 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3166 if !separate_nondust_htlc_sources {
3167 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3170 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3172 if separate_nondust_htlc_sources {
3173 if let Some(source) = source_opt.take() {
3174 nondust_htlc_sources.push(source);
3177 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3180 let holder_commitment_tx = HolderCommitmentTransaction::new(
3181 commitment_stats.tx,
3183 msg.htlc_signatures.clone(),
3184 &self.context.get_holder_pubkeys().funding_pubkey,
3185 self.context.counterparty_funding_pubkey()
3188 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3189 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3191 // Update state now that we've passed all the can-fail calls...
3192 let mut need_commitment = false;
3193 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3194 if *update_state == FeeUpdateState::RemoteAnnounced {
3195 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3196 need_commitment = true;
3200 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3201 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3202 Some(forward_info.clone())
3204 if let Some(forward_info) = new_forward {
3205 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3206 &htlc.payment_hash, &self.context.channel_id);
3207 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3208 need_commitment = true;
3211 let mut claimed_htlcs = Vec::new();
3212 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3213 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3214 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3215 &htlc.payment_hash, &self.context.channel_id);
3216 // Grab the preimage, if it exists, instead of cloning
3217 let mut reason = OutboundHTLCOutcome::Success(None);
3218 mem::swap(outcome, &mut reason);
3219 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3220 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3221 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3222 // have a `Success(None)` reason. In this case we could forget some HTLC
3223 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3224 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3226 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3228 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3229 need_commitment = true;
3233 self.context.latest_monitor_update_id += 1;
3234 let mut monitor_update = ChannelMonitorUpdate {
3235 update_id: self.context.latest_monitor_update_id,
3236 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3237 commitment_tx: holder_commitment_tx,
3238 htlc_outputs: htlcs_and_sigs,
3240 nondust_htlc_sources,
3244 self.context.cur_holder_commitment_transaction_number -= 1;
3245 self.context.expecting_peer_commitment_signed = false;
3246 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3247 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3248 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3250 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3251 // In case we initially failed monitor updating without requiring a response, we need
3252 // to make sure the RAA gets sent first.
3253 self.context.monitor_pending_revoke_and_ack = true;
3254 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3255 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3256 // the corresponding HTLC status updates so that
3257 // get_last_commitment_update_for_send includes the right HTLCs.
3258 self.context.monitor_pending_commitment_signed = true;
3259 let mut additional_update = self.build_commitment_no_status_check(logger);
3260 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3261 // strictly increasing by one, so decrement it here.
3262 self.context.latest_monitor_update_id = monitor_update.update_id;
3263 monitor_update.updates.append(&mut additional_update.updates);
3265 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3266 &self.context.channel_id);
3267 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3270 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3271 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3272 // we'll send one right away when we get the revoke_and_ack when we
3273 // free_holding_cell_htlcs().
3274 let mut additional_update = self.build_commitment_no_status_check(logger);
3275 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3276 // strictly increasing by one, so decrement it here.
3277 self.context.latest_monitor_update_id = monitor_update.update_id;
3278 monitor_update.updates.append(&mut additional_update.updates);
3282 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3283 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3284 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3285 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3288 /// Public version of the below, checking relevant preconditions first.
3289 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3290 /// returns `(None, Vec::new())`.
3291 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3292 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3293 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3294 where F::Target: FeeEstimator, L::Target: Logger
3296 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3297 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3298 self.free_holding_cell_htlcs(fee_estimator, logger)
3299 } else { (None, Vec::new()) }
3302 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3303 /// for our counterparty.
3304 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3305 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3306 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3307 where F::Target: FeeEstimator, L::Target: Logger
3309 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3310 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3311 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3312 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3314 let mut monitor_update = ChannelMonitorUpdate {
3315 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3316 updates: Vec::new(),
3319 let mut htlc_updates = Vec::new();
3320 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3321 let mut update_add_count = 0;
3322 let mut update_fulfill_count = 0;
3323 let mut update_fail_count = 0;
3324 let mut htlcs_to_fail = Vec::new();
3325 for htlc_update in htlc_updates.drain(..) {
3326 // Note that this *can* fail, though it should be due to rather-rare conditions on
3327 // fee races with adding too many outputs which push our total payments just over
3328 // the limit. In case it's less rare than I anticipate, we may want to revisit
3329 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3330 // to rebalance channels.
3331 match &htlc_update {
3332 &HTLCUpdateAwaitingACK::AddHTLC {
3333 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3334 skimmed_fee_msat, ..
3336 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3337 onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3339 Ok(_) => update_add_count += 1,
3342 ChannelError::Ignore(ref msg) => {
3343 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3344 // If we fail to send here, then this HTLC should
3345 // be failed backwards. Failing to send here
3346 // indicates that this HTLC may keep being put back
3347 // into the holding cell without ever being
3348 // successfully forwarded/failed/fulfilled, causing
3349 // our counterparty to eventually close on us.
3350 htlcs_to_fail.push((source.clone(), *payment_hash));
3353 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3359 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3360 // If an HTLC claim was previously added to the holding cell (via
3361 // `get_update_fulfill_htlc`, then generating the claim message itself must
3362 // not fail - any in between attempts to claim the HTLC will have resulted
3363 // in it hitting the holding cell again and we cannot change the state of a
3364 // holding cell HTLC from fulfill to anything else.
3365 let mut additional_monitor_update =
3366 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3367 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3368 { monitor_update } else { unreachable!() };
3369 update_fulfill_count += 1;
3370 monitor_update.updates.append(&mut additional_monitor_update.updates);
3372 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3373 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3374 Ok(update_fail_msg_option) => {
3375 // If an HTLC failure was previously added to the holding cell (via
3376 // `queue_fail_htlc`) then generating the fail message itself must
3377 // not fail - we should never end up in a state where we double-fail
3378 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3379 // for a full revocation before failing.
3380 debug_assert!(update_fail_msg_option.is_some());
3381 update_fail_count += 1;
3384 if let ChannelError::Ignore(_) = e {}
3386 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3393 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3394 return (None, htlcs_to_fail);
3396 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3397 self.send_update_fee(feerate, false, fee_estimator, logger)
3402 let mut additional_update = self.build_commitment_no_status_check(logger);
3403 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3404 // but we want them to be strictly increasing by one, so reset it here.
3405 self.context.latest_monitor_update_id = monitor_update.update_id;
3406 monitor_update.updates.append(&mut additional_update.updates);
3408 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3409 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3410 update_add_count, update_fulfill_count, update_fail_count);
3412 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3413 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3419 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3420 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3421 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3422 /// generating an appropriate error *after* the channel state has been updated based on the
3423 /// revoke_and_ack message.
3424 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3425 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3426 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3427 where F::Target: FeeEstimator, L::Target: Logger,
3429 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3430 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3432 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3433 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3435 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3436 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3439 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3441 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3442 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3443 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3447 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3448 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3449 // haven't given them a new commitment transaction to broadcast). We should probably
3450 // take advantage of this by updating our channel monitor, sending them an error, and
3451 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3452 // lot of work, and there's some chance this is all a misunderstanding anyway.
3453 // We have to do *something*, though, since our signer may get mad at us for otherwise
3454 // jumping a remote commitment number, so best to just force-close and move on.
3455 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3458 #[cfg(any(test, fuzzing))]
3460 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3461 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3464 match &self.context.holder_signer {
3465 ChannelSignerType::Ecdsa(ecdsa) => {
3466 ecdsa.validate_counterparty_revocation(
3467 self.context.cur_counterparty_commitment_transaction_number + 1,
3469 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3473 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3474 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3475 self.context.latest_monitor_update_id += 1;
3476 let mut monitor_update = ChannelMonitorUpdate {
3477 update_id: self.context.latest_monitor_update_id,
3478 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3479 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3480 secret: msg.per_commitment_secret,
3484 // Update state now that we've passed all the can-fail calls...
3485 // (note that we may still fail to generate the new commitment_signed message, but that's
3486 // OK, we step the channel here and *then* if the new generation fails we can fail the
3487 // channel based on that, but stepping stuff here should be safe either way.
3488 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3489 self.context.sent_message_awaiting_response = None;
3490 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3491 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3492 self.context.cur_counterparty_commitment_transaction_number -= 1;
3494 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3495 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3498 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3499 let mut to_forward_infos = Vec::new();
3500 let mut revoked_htlcs = Vec::new();
3501 let mut finalized_claimed_htlcs = Vec::new();
3502 let mut update_fail_htlcs = Vec::new();
3503 let mut update_fail_malformed_htlcs = Vec::new();
3504 let mut require_commitment = false;
3505 let mut value_to_self_msat_diff: i64 = 0;
3508 // Take references explicitly so that we can hold multiple references to self.context.
3509 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3510 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3511 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3513 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3514 pending_inbound_htlcs.retain(|htlc| {
3515 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3516 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3517 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3518 value_to_self_msat_diff += htlc.amount_msat as i64;
3520 *expecting_peer_commitment_signed = true;
3524 pending_outbound_htlcs.retain(|htlc| {
3525 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3526 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3527 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3528 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3530 finalized_claimed_htlcs.push(htlc.source.clone());
3531 // They fulfilled, so we sent them money
3532 value_to_self_msat_diff -= htlc.amount_msat as i64;
3537 for htlc in pending_inbound_htlcs.iter_mut() {
3538 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3540 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3544 let mut state = InboundHTLCState::Committed;
3545 mem::swap(&mut state, &mut htlc.state);
3547 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3548 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3549 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3550 require_commitment = true;
3551 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3552 match forward_info {
3553 PendingHTLCStatus::Fail(fail_msg) => {
3554 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3555 require_commitment = true;
3557 HTLCFailureMsg::Relay(msg) => {
3558 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3559 update_fail_htlcs.push(msg)
3561 HTLCFailureMsg::Malformed(msg) => {
3562 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3563 update_fail_malformed_htlcs.push(msg)
3567 PendingHTLCStatus::Forward(forward_info) => {
3568 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3569 to_forward_infos.push((forward_info, htlc.htlc_id));
3570 htlc.state = InboundHTLCState::Committed;
3576 for htlc in pending_outbound_htlcs.iter_mut() {
3577 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3578 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3579 htlc.state = OutboundHTLCState::Committed;
3580 *expecting_peer_commitment_signed = true;
3582 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3583 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3584 // Grab the preimage, if it exists, instead of cloning
3585 let mut reason = OutboundHTLCOutcome::Success(None);
3586 mem::swap(outcome, &mut reason);
3587 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3588 require_commitment = true;
3592 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3594 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3595 match update_state {
3596 FeeUpdateState::Outbound => {
3597 debug_assert!(self.context.is_outbound());
3598 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3599 self.context.feerate_per_kw = feerate;
3600 self.context.pending_update_fee = None;
3601 self.context.expecting_peer_commitment_signed = true;
3603 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3604 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3605 debug_assert!(!self.context.is_outbound());
3606 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3607 require_commitment = true;
3608 self.context.feerate_per_kw = feerate;
3609 self.context.pending_update_fee = None;
3614 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3615 let release_state_str =
3616 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3617 macro_rules! return_with_htlcs_to_fail {
3618 ($htlcs_to_fail: expr) => {
3619 if !release_monitor {
3620 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3621 update: monitor_update,
3623 return Ok(($htlcs_to_fail, None));
3625 return Ok(($htlcs_to_fail, Some(monitor_update)));
3630 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3631 // We can't actually generate a new commitment transaction (incl by freeing holding
3632 // cells) while we can't update the monitor, so we just return what we have.
3633 if require_commitment {
3634 self.context.monitor_pending_commitment_signed = true;
3635 // When the monitor updating is restored we'll call
3636 // get_last_commitment_update_for_send(), which does not update state, but we're
3637 // definitely now awaiting a remote revoke before we can step forward any more, so
3639 let mut additional_update = self.build_commitment_no_status_check(logger);
3640 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3641 // strictly increasing by one, so decrement it here.
3642 self.context.latest_monitor_update_id = monitor_update.update_id;
3643 monitor_update.updates.append(&mut additional_update.updates);
3645 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3646 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3647 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3648 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3649 return_with_htlcs_to_fail!(Vec::new());
3652 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3653 (Some(mut additional_update), htlcs_to_fail) => {
3654 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3655 // strictly increasing by one, so decrement it here.
3656 self.context.latest_monitor_update_id = monitor_update.update_id;
3657 monitor_update.updates.append(&mut additional_update.updates);
3659 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3660 &self.context.channel_id(), release_state_str);
3662 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3663 return_with_htlcs_to_fail!(htlcs_to_fail);
3665 (None, htlcs_to_fail) => {
3666 if require_commitment {
3667 let mut additional_update = self.build_commitment_no_status_check(logger);
3669 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3670 // strictly increasing by one, so decrement it here.
3671 self.context.latest_monitor_update_id = monitor_update.update_id;
3672 monitor_update.updates.append(&mut additional_update.updates);
3674 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3675 &self.context.channel_id(),
3676 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3679 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3680 return_with_htlcs_to_fail!(htlcs_to_fail);
3682 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3683 &self.context.channel_id(), release_state_str);
3685 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3686 return_with_htlcs_to_fail!(htlcs_to_fail);
3692 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3693 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3694 /// commitment update.
3695 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3696 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3697 where F::Target: FeeEstimator, L::Target: Logger
3699 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3700 assert!(msg_opt.is_none(), "We forced holding cell?");
3703 /// Adds a pending update to this channel. See the doc for send_htlc for
3704 /// further details on the optionness of the return value.
3705 /// If our balance is too low to cover the cost of the next commitment transaction at the
3706 /// new feerate, the update is cancelled.
3708 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3709 /// [`Channel`] if `force_holding_cell` is false.
3710 fn send_update_fee<F: Deref, L: Deref>(
3711 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3712 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3713 ) -> Option<msgs::UpdateFee>
3714 where F::Target: FeeEstimator, L::Target: Logger
3716 if !self.context.is_outbound() {
3717 panic!("Cannot send fee from inbound channel");
3719 if !self.context.is_usable() {
3720 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3722 if !self.context.is_live() {
3723 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3726 // Before proposing a feerate update, check that we can actually afford the new fee.
3727 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3728 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3729 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3730 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3731 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3732 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3733 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3734 //TODO: auto-close after a number of failures?
3735 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3739 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3740 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3741 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3742 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3743 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3744 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3747 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3748 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3752 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3753 force_holding_cell = true;
3756 if force_holding_cell {
3757 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3761 debug_assert!(self.context.pending_update_fee.is_none());
3762 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3764 Some(msgs::UpdateFee {
3765 channel_id: self.context.channel_id,
3770 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3771 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3773 /// No further message handling calls may be made until a channel_reestablish dance has
3775 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3776 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3777 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3778 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3782 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3783 // While the below code should be idempotent, it's simpler to just return early, as
3784 // redundant disconnect events can fire, though they should be rare.
3788 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3789 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3792 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3793 // will be retransmitted.
3794 self.context.last_sent_closing_fee = None;
3795 self.context.pending_counterparty_closing_signed = None;
3796 self.context.closing_fee_limits = None;
3798 let mut inbound_drop_count = 0;
3799 self.context.pending_inbound_htlcs.retain(|htlc| {
3801 InboundHTLCState::RemoteAnnounced(_) => {
3802 // They sent us an update_add_htlc but we never got the commitment_signed.
3803 // We'll tell them what commitment_signed we're expecting next and they'll drop
3804 // this HTLC accordingly
3805 inbound_drop_count += 1;
3808 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3809 // We received a commitment_signed updating this HTLC and (at least hopefully)
3810 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3811 // in response to it yet, so don't touch it.
3814 InboundHTLCState::Committed => true,
3815 InboundHTLCState::LocalRemoved(_) => {
3816 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3817 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3818 // (that we missed). Keep this around for now and if they tell us they missed
3819 // the commitment_signed we can re-transmit the update then.
3824 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3826 if let Some((_, update_state)) = self.context.pending_update_fee {
3827 if update_state == FeeUpdateState::RemoteAnnounced {
3828 debug_assert!(!self.context.is_outbound());
3829 self.context.pending_update_fee = None;
3833 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3834 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3835 // They sent us an update to remove this but haven't yet sent the corresponding
3836 // commitment_signed, we need to move it back to Committed and they can re-send
3837 // the update upon reconnection.
3838 htlc.state = OutboundHTLCState::Committed;
3842 self.context.sent_message_awaiting_response = None;
3844 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3845 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3849 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3850 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3851 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3852 /// update completes (potentially immediately).
3853 /// The messages which were generated with the monitor update must *not* have been sent to the
3854 /// remote end, and must instead have been dropped. They will be regenerated when
3855 /// [`Self::monitor_updating_restored`] is called.
3857 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3858 /// [`chain::Watch`]: crate::chain::Watch
3859 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3860 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3861 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3862 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3863 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3865 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3866 self.context.monitor_pending_commitment_signed |= resend_commitment;
3867 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3868 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3869 self.context.monitor_pending_failures.append(&mut pending_fails);
3870 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3871 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3874 /// Indicates that the latest ChannelMonitor update has been committed by the client
3875 /// successfully and we should restore normal operation. Returns messages which should be sent
3876 /// to the remote side.
3877 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3878 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3879 user_config: &UserConfig, best_block_height: u32
3880 ) -> MonitorRestoreUpdates
3883 NS::Target: NodeSigner
3885 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3886 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3888 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3889 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3890 // first received the funding_signed.
3891 let mut funding_broadcastable =
3892 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3893 self.context.funding_transaction.take()
3895 // That said, if the funding transaction is already confirmed (ie we're active with a
3896 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3897 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3898 funding_broadcastable = None;
3901 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3902 // (and we assume the user never directly broadcasts the funding transaction and waits for
3903 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3904 // * an inbound channel that failed to persist the monitor on funding_created and we got
3905 // the funding transaction confirmed before the monitor was persisted, or
3906 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3907 let channel_ready = if self.context.monitor_pending_channel_ready {
3908 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3909 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3910 self.context.monitor_pending_channel_ready = false;
3911 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3912 Some(msgs::ChannelReady {
3913 channel_id: self.context.channel_id(),
3914 next_per_commitment_point,
3915 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3919 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3921 let mut accepted_htlcs = Vec::new();
3922 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3923 let mut failed_htlcs = Vec::new();
3924 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3925 let mut finalized_claimed_htlcs = Vec::new();
3926 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3928 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3929 self.context.monitor_pending_revoke_and_ack = false;
3930 self.context.monitor_pending_commitment_signed = false;
3931 return MonitorRestoreUpdates {
3932 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3933 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3937 let raa = if self.context.monitor_pending_revoke_and_ack {
3938 Some(self.get_last_revoke_and_ack())
3940 let commitment_update = if self.context.monitor_pending_commitment_signed {
3941 self.get_last_commitment_update_for_send(logger).ok()
3943 if commitment_update.is_some() {
3944 self.mark_awaiting_response();
3947 self.context.monitor_pending_revoke_and_ack = false;
3948 self.context.monitor_pending_commitment_signed = false;
3949 let order = self.context.resend_order.clone();
3950 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3951 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3952 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3953 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3954 MonitorRestoreUpdates {
3955 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3959 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3960 where F::Target: FeeEstimator, L::Target: Logger
3962 if self.context.is_outbound() {
3963 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3965 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3966 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3968 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3969 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3971 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3972 self.context.update_time_counter += 1;
3973 // If the feerate has increased over the previous dust buffer (note that
3974 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3975 // won't be pushed over our dust exposure limit by the feerate increase.
3976 if feerate_over_dust_buffer {
3977 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3978 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3979 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3980 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3981 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3982 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3983 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3984 msg.feerate_per_kw, holder_tx_dust_exposure)));
3986 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3987 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3988 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3994 /// Indicates that the signer may have some signatures for us, so we should retry if we're
3997 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
3998 let commitment_update = if self.context.signer_pending_commitment_update {
3999 self.get_last_commitment_update_for_send(logger).ok()
4001 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4002 self.context.get_funding_signed_msg(logger).1
4004 let channel_ready = if funding_signed.is_some() {
4005 self.check_get_channel_ready(0)
4007 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4008 self.context.get_funding_created_msg(logger)
4011 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4012 if commitment_update.is_some() { "a" } else { "no" },
4013 if funding_signed.is_some() { "a" } else { "no" },
4014 if funding_created.is_some() { "a" } else { "no" },
4015 if channel_ready.is_some() { "a" } else { "no" });
4017 SignerResumeUpdates {
4025 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4026 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4027 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4028 msgs::RevokeAndACK {
4029 channel_id: self.context.channel_id,
4030 per_commitment_secret,
4031 next_per_commitment_point,
4033 next_local_nonce: None,
4037 /// Gets the last commitment update for immediate sending to our peer.
4038 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4039 let mut update_add_htlcs = Vec::new();
4040 let mut update_fulfill_htlcs = Vec::new();
4041 let mut update_fail_htlcs = Vec::new();
4042 let mut update_fail_malformed_htlcs = Vec::new();
4044 for htlc in self.context.pending_outbound_htlcs.iter() {
4045 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4046 update_add_htlcs.push(msgs::UpdateAddHTLC {
4047 channel_id: self.context.channel_id(),
4048 htlc_id: htlc.htlc_id,
4049 amount_msat: htlc.amount_msat,
4050 payment_hash: htlc.payment_hash,
4051 cltv_expiry: htlc.cltv_expiry,
4052 onion_routing_packet: (**onion_packet).clone(),
4053 skimmed_fee_msat: htlc.skimmed_fee_msat,
4058 for htlc in self.context.pending_inbound_htlcs.iter() {
4059 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4061 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4062 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4063 channel_id: self.context.channel_id(),
4064 htlc_id: htlc.htlc_id,
4065 reason: err_packet.clone()
4068 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4069 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4070 channel_id: self.context.channel_id(),
4071 htlc_id: htlc.htlc_id,
4072 sha256_of_onion: sha256_of_onion.clone(),
4073 failure_code: failure_code.clone(),
4076 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4077 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4078 channel_id: self.context.channel_id(),
4079 htlc_id: htlc.htlc_id,
4080 payment_preimage: payment_preimage.clone(),
4087 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4088 Some(msgs::UpdateFee {
4089 channel_id: self.context.channel_id(),
4090 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4094 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4095 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4096 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4097 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4098 if self.context.signer_pending_commitment_update {
4099 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4100 self.context.signer_pending_commitment_update = false;
4104 if !self.context.signer_pending_commitment_update {
4105 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4106 self.context.signer_pending_commitment_update = true;
4110 Ok(msgs::CommitmentUpdate {
4111 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4116 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4117 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4118 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4119 assert!(self.context.shutdown_scriptpubkey.is_some());
4120 Some(msgs::Shutdown {
4121 channel_id: self.context.channel_id,
4122 scriptpubkey: self.get_closing_scriptpubkey(),
4127 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4128 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4130 /// Some links printed in log lines are included here to check them during build (when run with
4131 /// `cargo doc --document-private-items`):
4132 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4133 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4134 pub fn channel_reestablish<L: Deref, NS: Deref>(
4135 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4136 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4137 ) -> Result<ReestablishResponses, ChannelError>
4140 NS::Target: NodeSigner
4142 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4143 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4144 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4145 // just close here instead of trying to recover.
4146 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4149 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4150 msg.next_local_commitment_number == 0 {
4151 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4154 if msg.next_remote_commitment_number > 0 {
4155 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4156 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4157 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4158 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4159 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4161 if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4162 macro_rules! log_and_panic {
4163 ($err_msg: expr) => {
4164 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4165 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4168 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4169 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4170 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4171 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4172 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4173 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4174 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4175 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4179 // Before we change the state of the channel, we check if the peer is sending a very old
4180 // commitment transaction number, if yes we send a warning message.
4181 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4182 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4184 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4188 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4189 // remaining cases either succeed or ErrorMessage-fail).
4190 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4191 self.context.sent_message_awaiting_response = None;
4193 let shutdown_msg = self.get_outbound_shutdown();
4195 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4197 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4198 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4199 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4200 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4201 if msg.next_remote_commitment_number != 0 {
4202 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4204 // Short circuit the whole handler as there is nothing we can resend them
4205 return Ok(ReestablishResponses {
4206 channel_ready: None,
4207 raa: None, commitment_update: None,
4208 order: RAACommitmentOrder::CommitmentFirst,
4209 shutdown_msg, announcement_sigs,
4213 // We have OurChannelReady set!
4214 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4215 return Ok(ReestablishResponses {
4216 channel_ready: Some(msgs::ChannelReady {
4217 channel_id: self.context.channel_id(),
4218 next_per_commitment_point,
4219 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4221 raa: None, commitment_update: None,
4222 order: RAACommitmentOrder::CommitmentFirst,
4223 shutdown_msg, announcement_sigs,
4227 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4228 // Remote isn't waiting on any RevokeAndACK from us!
4229 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4231 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4232 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4233 self.context.monitor_pending_revoke_and_ack = true;
4236 Some(self.get_last_revoke_and_ack())
4239 return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4242 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4243 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4244 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4245 // the corresponding revoke_and_ack back yet.
4246 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4247 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4248 self.mark_awaiting_response();
4250 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4252 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4253 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4254 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4255 Some(msgs::ChannelReady {
4256 channel_id: self.context.channel_id(),
4257 next_per_commitment_point,
4258 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4262 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4263 if required_revoke.is_some() {
4264 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4266 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4269 Ok(ReestablishResponses {
4270 channel_ready, shutdown_msg, announcement_sigs,
4271 raa: required_revoke,
4272 commitment_update: None,
4273 order: self.context.resend_order.clone(),
4275 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4276 if required_revoke.is_some() {
4277 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4279 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4282 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4283 self.context.monitor_pending_commitment_signed = true;
4284 Ok(ReestablishResponses {
4285 channel_ready, shutdown_msg, announcement_sigs,
4286 commitment_update: None, raa: None,
4287 order: self.context.resend_order.clone(),
4290 Ok(ReestablishResponses {
4291 channel_ready, shutdown_msg, announcement_sigs,
4292 raa: required_revoke,
4293 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4294 order: self.context.resend_order.clone(),
4298 Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4302 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4303 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4304 /// at which point they will be recalculated.
4305 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4307 where F::Target: FeeEstimator
4309 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4311 // Propose a range from our current Background feerate to our Normal feerate plus our
4312 // force_close_avoidance_max_fee_satoshis.
4313 // If we fail to come to consensus, we'll have to force-close.
4314 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4315 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4316 // that we don't expect to need fee bumping
4317 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4318 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4320 // The spec requires that (when the channel does not have anchors) we only send absolute
4321 // channel fees no greater than the absolute channel fee on the current commitment
4322 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4323 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4324 // some force-closure by old nodes, but we wanted to close the channel anyway.
4326 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4327 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4328 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4329 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4332 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4333 // below our dust limit, causing the output to disappear. We don't bother handling this
4334 // case, however, as this should only happen if a channel is closed before any (material)
4335 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4336 // come to consensus with our counterparty on appropriate fees, however it should be a
4337 // relatively rare case. We can revisit this later, though note that in order to determine
4338 // if the funders' output is dust we have to know the absolute fee we're going to use.
4339 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4340 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4341 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4342 // We always add force_close_avoidance_max_fee_satoshis to our normal
4343 // feerate-calculated fee, but allow the max to be overridden if we're using a
4344 // target feerate-calculated fee.
4345 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4346 proposed_max_feerate as u64 * tx_weight / 1000)
4348 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4351 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4352 self.context.closing_fee_limits.clone().unwrap()
4355 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4356 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4357 /// this point if we're the funder we should send the initial closing_signed, and in any case
4358 /// shutdown should complete within a reasonable timeframe.
4359 fn closing_negotiation_ready(&self) -> bool {
4360 self.context.closing_negotiation_ready()
4363 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4364 /// an Err if no progress is being made and the channel should be force-closed instead.
4365 /// Should be called on a one-minute timer.
4366 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4367 if self.closing_negotiation_ready() {
4368 if self.context.closing_signed_in_flight {
4369 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4371 self.context.closing_signed_in_flight = true;
4377 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4378 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4379 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4380 where F::Target: FeeEstimator, L::Target: Logger
4382 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4383 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4384 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4385 // that closing_negotiation_ready checks this case (as well as a few others).
4386 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4387 return Ok((None, None, None));
4390 if !self.context.is_outbound() {
4391 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4392 return self.closing_signed(fee_estimator, &msg);
4394 return Ok((None, None, None));
4397 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4398 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4399 if self.context.expecting_peer_commitment_signed {
4400 return Ok((None, None, None));
4403 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4405 assert!(self.context.shutdown_scriptpubkey.is_some());
4406 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4407 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4408 our_min_fee, our_max_fee, total_fee_satoshis);
4410 match &self.context.holder_signer {
4411 ChannelSignerType::Ecdsa(ecdsa) => {
4413 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4414 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4416 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4417 Ok((Some(msgs::ClosingSigned {
4418 channel_id: self.context.channel_id,
4419 fee_satoshis: total_fee_satoshis,
4421 fee_range: Some(msgs::ClosingSignedFeeRange {
4422 min_fee_satoshis: our_min_fee,
4423 max_fee_satoshis: our_max_fee,
4430 // Marks a channel as waiting for a response from the counterparty. If it's not received
4431 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4433 fn mark_awaiting_response(&mut self) {
4434 self.context.sent_message_awaiting_response = Some(0);
4437 /// Determines whether we should disconnect the counterparty due to not receiving a response
4438 /// within our expected timeframe.
4440 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4441 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4442 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4445 // Don't disconnect when we're not waiting on a response.
4448 *ticks_elapsed += 1;
4449 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4453 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4454 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4456 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4457 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4459 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4460 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4461 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4462 // can do that via error message without getting a connection fail anyway...
4463 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4465 for htlc in self.context.pending_inbound_htlcs.iter() {
4466 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4467 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4470 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4472 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4473 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4476 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4477 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4478 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4481 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4484 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4485 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4486 // any further commitment updates after we set LocalShutdownSent.
4487 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4489 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4492 assert!(send_shutdown);
4493 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4494 Ok(scriptpubkey) => scriptpubkey,
4495 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4497 if !shutdown_scriptpubkey.is_compatible(their_features) {
4498 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4500 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4505 // From here on out, we may not fail!
4507 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4508 self.context.update_time_counter += 1;
4510 let monitor_update = if update_shutdown_script {
4511 self.context.latest_monitor_update_id += 1;
4512 let monitor_update = ChannelMonitorUpdate {
4513 update_id: self.context.latest_monitor_update_id,
4514 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4515 scriptpubkey: self.get_closing_scriptpubkey(),
4518 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4519 self.push_ret_blockable_mon_update(monitor_update)
4521 let shutdown = if send_shutdown {
4522 Some(msgs::Shutdown {
4523 channel_id: self.context.channel_id,
4524 scriptpubkey: self.get_closing_scriptpubkey(),
4528 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4529 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4530 // cell HTLCs and return them to fail the payment.
4531 self.context.holding_cell_update_fee = None;
4532 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4533 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4535 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4536 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4543 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4544 self.context.update_time_counter += 1;
4546 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4549 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4550 let mut tx = closing_tx.trust().built_transaction().clone();
4552 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4554 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4555 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4556 let mut holder_sig = sig.serialize_der().to_vec();
4557 holder_sig.push(EcdsaSighashType::All as u8);
4558 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4559 cp_sig.push(EcdsaSighashType::All as u8);
4560 if funding_key[..] < counterparty_funding_key[..] {
4561 tx.input[0].witness.push(holder_sig);
4562 tx.input[0].witness.push(cp_sig);
4564 tx.input[0].witness.push(cp_sig);
4565 tx.input[0].witness.push(holder_sig);
4568 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4572 pub fn closing_signed<F: Deref>(
4573 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4574 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4575 where F::Target: FeeEstimator
4577 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4578 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4580 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4581 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4583 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4584 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4586 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4587 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4590 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4591 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4594 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4595 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4596 return Ok((None, None, None));
4599 let funding_redeemscript = self.context.get_funding_redeemscript();
4600 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4601 if used_total_fee != msg.fee_satoshis {
4602 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4604 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4606 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4609 // The remote end may have decided to revoke their output due to inconsistent dust
4610 // limits, so check for that case by re-checking the signature here.
4611 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4612 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4613 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4617 for outp in closing_tx.trust().built_transaction().output.iter() {
4618 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4619 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4623 assert!(self.context.shutdown_scriptpubkey.is_some());
4624 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4625 if last_fee == msg.fee_satoshis {
4626 let shutdown_result = ShutdownResult {
4627 monitor_update: None,
4628 dropped_outbound_htlcs: Vec::new(),
4629 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4631 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4632 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4633 self.context.update_time_counter += 1;
4634 return Ok((None, Some(tx), Some(shutdown_result)));
4638 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4640 macro_rules! propose_fee {
4641 ($new_fee: expr) => {
4642 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4643 (closing_tx, $new_fee)
4645 self.build_closing_transaction($new_fee, false)
4648 return match &self.context.holder_signer {
4649 ChannelSignerType::Ecdsa(ecdsa) => {
4651 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4652 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4653 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4654 let shutdown_result = ShutdownResult {
4655 monitor_update: None,
4656 dropped_outbound_htlcs: Vec::new(),
4657 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4659 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4660 self.context.update_time_counter += 1;
4661 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4662 (Some(tx), Some(shutdown_result))
4667 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4668 Ok((Some(msgs::ClosingSigned {
4669 channel_id: self.context.channel_id,
4670 fee_satoshis: used_fee,
4672 fee_range: Some(msgs::ClosingSignedFeeRange {
4673 min_fee_satoshis: our_min_fee,
4674 max_fee_satoshis: our_max_fee,
4676 }), signed_tx, shutdown_result))
4682 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4683 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4684 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4686 if max_fee_satoshis < our_min_fee {
4687 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4689 if min_fee_satoshis > our_max_fee {
4690 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4693 if !self.context.is_outbound() {
4694 // They have to pay, so pick the highest fee in the overlapping range.
4695 // We should never set an upper bound aside from their full balance
4696 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4697 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4699 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4700 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4701 msg.fee_satoshis, our_min_fee, our_max_fee)));
4703 // The proposed fee is in our acceptable range, accept it and broadcast!
4704 propose_fee!(msg.fee_satoshis);
4707 // Old fee style negotiation. We don't bother to enforce whether they are complying
4708 // with the "making progress" requirements, we just comply and hope for the best.
4709 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4710 if msg.fee_satoshis > last_fee {
4711 if msg.fee_satoshis < our_max_fee {
4712 propose_fee!(msg.fee_satoshis);
4713 } else if last_fee < our_max_fee {
4714 propose_fee!(our_max_fee);
4716 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4719 if msg.fee_satoshis > our_min_fee {
4720 propose_fee!(msg.fee_satoshis);
4721 } else if last_fee > our_min_fee {
4722 propose_fee!(our_min_fee);
4724 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4728 if msg.fee_satoshis < our_min_fee {
4729 propose_fee!(our_min_fee);
4730 } else if msg.fee_satoshis > our_max_fee {
4731 propose_fee!(our_max_fee);
4733 propose_fee!(msg.fee_satoshis);
4739 fn internal_htlc_satisfies_config(
4740 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4741 ) -> Result<(), (&'static str, u16)> {
4742 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4743 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4744 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4745 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4747 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4748 0x1000 | 12, // fee_insufficient
4751 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4753 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4754 0x1000 | 13, // incorrect_cltv_expiry
4760 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4761 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4762 /// unsuccessful, falls back to the previous one if one exists.
4763 pub fn htlc_satisfies_config(
4764 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4765 ) -> Result<(), (&'static str, u16)> {
4766 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4768 if let Some(prev_config) = self.context.prev_config() {
4769 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4776 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4777 self.context.cur_holder_commitment_transaction_number + 1
4780 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4781 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4784 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4785 self.context.cur_counterparty_commitment_transaction_number + 2
4789 pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
4790 &self.context.holder_signer
4794 pub fn get_value_stat(&self) -> ChannelValueStat {
4796 value_to_self_msat: self.context.value_to_self_msat,
4797 channel_value_msat: self.context.channel_value_satoshis * 1000,
4798 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4799 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4800 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4801 holding_cell_outbound_amount_msat: {
4803 for h in self.context.holding_cell_htlc_updates.iter() {
4805 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4813 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4814 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4818 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4819 /// Allowed in any state (including after shutdown)
4820 pub fn is_awaiting_monitor_update(&self) -> bool {
4821 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4824 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4825 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4826 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4827 self.context.blocked_monitor_updates[0].update.update_id - 1
4830 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4831 /// further blocked monitor update exists after the next.
4832 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4833 if self.context.blocked_monitor_updates.is_empty() { return None; }
4834 Some((self.context.blocked_monitor_updates.remove(0).update,
4835 !self.context.blocked_monitor_updates.is_empty()))
4838 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4839 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4840 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4841 -> Option<ChannelMonitorUpdate> {
4842 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4843 if !release_monitor {
4844 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4853 pub fn blocked_monitor_updates_pending(&self) -> usize {
4854 self.context.blocked_monitor_updates.len()
4857 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4858 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4859 /// transaction. If the channel is inbound, this implies simply that the channel has not
4861 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4862 if !self.is_awaiting_monitor_update() { return false; }
4863 if self.context.channel_state &
4864 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4865 == ChannelState::FundingSent as u32 {
4866 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4867 // FundingSent set, though our peer could have sent their channel_ready.
4868 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4871 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4872 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4873 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4874 // waiting for the initial monitor persistence. Thus, we check if our commitment
4875 // transaction numbers have both been iterated only exactly once (for the
4876 // funding_signed), and we're awaiting monitor update.
4878 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4879 // only way to get an awaiting-monitor-update state during initial funding is if the
4880 // initial monitor persistence is still pending).
4882 // Because deciding we're awaiting initial broadcast spuriously could result in
4883 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4884 // we hard-assert here, even in production builds.
4885 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4886 assert!(self.context.monitor_pending_channel_ready);
4887 assert_eq!(self.context.latest_monitor_update_id, 0);
4893 /// Returns true if our channel_ready has been sent
4894 pub fn is_our_channel_ready(&self) -> bool {
4895 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4898 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4899 pub fn received_shutdown(&self) -> bool {
4900 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4903 /// Returns true if we either initiated or agreed to shut down the channel.
4904 pub fn sent_shutdown(&self) -> bool {
4905 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4908 /// Returns true if this channel is fully shut down. True here implies that no further actions
4909 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4910 /// will be handled appropriately by the chain monitor.
4911 pub fn is_shutdown(&self) -> bool {
4912 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4913 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4918 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4919 self.context.channel_update_status
4922 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4923 self.context.update_time_counter += 1;
4924 self.context.channel_update_status = status;
4927 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4929 // * always when a new block/transactions are confirmed with the new height
4930 // * when funding is signed with a height of 0
4931 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4935 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4936 if funding_tx_confirmations <= 0 {
4937 self.context.funding_tx_confirmation_height = 0;
4940 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4944 // If we're still pending the signature on a funding transaction, then we're not ready to send a
4945 // channel_ready yet.
4946 if self.context.signer_pending_funding {
4950 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4951 // channel_ready until the entire batch is ready.
4952 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4953 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4954 self.context.channel_state |= ChannelState::OurChannelReady as u32;
4956 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4957 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4958 self.context.update_time_counter += 1;
4960 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4961 // We got a reorg but not enough to trigger a force close, just ignore.
4964 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4965 // We should never see a funding transaction on-chain until we've received
4966 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4967 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4968 // however, may do this and we shouldn't treat it as a bug.
4969 #[cfg(not(fuzzing))]
4970 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4971 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4972 self.context.channel_state);
4974 // We got a reorg but not enough to trigger a force close, just ignore.
4978 if need_commitment_update {
4979 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4980 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4981 let next_per_commitment_point =
4982 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4983 return Some(msgs::ChannelReady {
4984 channel_id: self.context.channel_id,
4985 next_per_commitment_point,
4986 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4990 self.context.monitor_pending_channel_ready = true;
4996 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4997 /// In the first case, we store the confirmation height and calculating the short channel id.
4998 /// In the second, we simply return an Err indicating we need to be force-closed now.
4999 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5000 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5001 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5002 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5004 NS::Target: NodeSigner,
5007 let mut msgs = (None, None);
5008 if let Some(funding_txo) = self.context.get_funding_txo() {
5009 for &(index_in_block, tx) in txdata.iter() {
5010 // Check if the transaction is the expected funding transaction, and if it is,
5011 // check that it pays the right amount to the right script.
5012 if self.context.funding_tx_confirmation_height == 0 {
5013 if tx.txid() == funding_txo.txid {
5014 let txo_idx = funding_txo.index as usize;
5015 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5016 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5017 if self.context.is_outbound() {
5018 // If we generated the funding transaction and it doesn't match what it
5019 // should, the client is really broken and we should just panic and
5020 // tell them off. That said, because hash collisions happen with high
5021 // probability in fuzzing mode, if we're fuzzing we just close the
5022 // channel and move on.
5023 #[cfg(not(fuzzing))]
5024 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5026 self.context.update_time_counter += 1;
5027 let err_reason = "funding tx had wrong script/value or output index";
5028 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5030 if self.context.is_outbound() {
5031 if !tx.is_coin_base() {
5032 for input in tx.input.iter() {
5033 if input.witness.is_empty() {
5034 // We generated a malleable funding transaction, implying we've
5035 // just exposed ourselves to funds loss to our counterparty.
5036 #[cfg(not(fuzzing))]
5037 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5042 self.context.funding_tx_confirmation_height = height;
5043 self.context.funding_tx_confirmed_in = Some(*block_hash);
5044 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5045 Ok(scid) => Some(scid),
5046 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5049 // If this is a coinbase transaction and not a 0-conf channel
5050 // we should update our min_depth to 100 to handle coinbase maturity
5051 if tx.is_coin_base() &&
5052 self.context.minimum_depth.unwrap_or(0) > 0 &&
5053 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5054 self.context.minimum_depth = Some(COINBASE_MATURITY);
5057 // If we allow 1-conf funding, we may need to check for channel_ready here and
5058 // send it immediately instead of waiting for a best_block_updated call (which
5059 // may have already happened for this block).
5060 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5061 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5062 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5063 msgs = (Some(channel_ready), announcement_sigs);
5066 for inp in tx.input.iter() {
5067 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5068 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5069 return Err(ClosureReason::CommitmentTxConfirmed);
5077 /// When a new block is connected, we check the height of the block against outbound holding
5078 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5079 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5080 /// handled by the ChannelMonitor.
5082 /// If we return Err, the channel may have been closed, at which point the standard
5083 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5086 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5088 pub fn best_block_updated<NS: Deref, L: Deref>(
5089 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5090 node_signer: &NS, user_config: &UserConfig, logger: &L
5091 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5093 NS::Target: NodeSigner,
5096 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5099 fn do_best_block_updated<NS: Deref, L: Deref>(
5100 &mut self, height: u32, highest_header_time: u32,
5101 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5102 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5104 NS::Target: NodeSigner,
5107 let mut timed_out_htlcs = Vec::new();
5108 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5109 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5111 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5112 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5114 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5115 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5116 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5124 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5126 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5127 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5128 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5130 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5131 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5134 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5135 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5136 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5137 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5138 if self.context.funding_tx_confirmation_height == 0 {
5139 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5140 // zero if it has been reorged out, however in either case, our state flags
5141 // indicate we've already sent a channel_ready
5142 funding_tx_confirmations = 0;
5145 // If we've sent channel_ready (or have both sent and received channel_ready), and
5146 // the funding transaction has become unconfirmed,
5147 // close the channel and hope we can get the latest state on chain (because presumably
5148 // the funding transaction is at least still in the mempool of most nodes).
5150 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5151 // 0-conf channel, but not doing so may lead to the
5152 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5154 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5155 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5156 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5157 return Err(ClosureReason::ProcessingError { err: err_reason });
5159 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5160 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5161 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5162 // If funding_tx_confirmed_in is unset, the channel must not be active
5163 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5164 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5165 return Err(ClosureReason::FundingTimedOut);
5168 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5169 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5171 Ok((None, timed_out_htlcs, announcement_sigs))
5174 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5175 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5176 /// before the channel has reached channel_ready and we can just wait for more blocks.
5177 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5178 if self.context.funding_tx_confirmation_height != 0 {
5179 // We handle the funding disconnection by calling best_block_updated with a height one
5180 // below where our funding was connected, implying a reorg back to conf_height - 1.
5181 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5182 // We use the time field to bump the current time we set on channel updates if its
5183 // larger. If we don't know that time has moved forward, we can just set it to the last
5184 // time we saw and it will be ignored.
5185 let best_time = self.context.update_time_counter;
5186 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5187 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5188 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5189 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5190 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5196 // We never learned about the funding confirmation anyway, just ignore
5201 // Methods to get unprompted messages to send to the remote end (or where we already returned
5202 // something in the handler for the message that prompted this message):
5204 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5205 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5206 /// directions). Should be used for both broadcasted announcements and in response to an
5207 /// AnnouncementSignatures message from the remote peer.
5209 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5212 /// This will only return ChannelError::Ignore upon failure.
5214 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5215 fn get_channel_announcement<NS: Deref>(
5216 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5217 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5218 if !self.context.config.announced_channel {
5219 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5221 if !self.context.is_usable() {
5222 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5225 let short_channel_id = self.context.get_short_channel_id()
5226 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5227 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5228 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5229 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5230 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5232 let msg = msgs::UnsignedChannelAnnouncement {
5233 features: channelmanager::provided_channel_features(&user_config),
5236 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5237 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5238 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5239 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5240 excess_data: Vec::new(),
5246 fn get_announcement_sigs<NS: Deref, L: Deref>(
5247 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5248 best_block_height: u32, logger: &L
5249 ) -> Option<msgs::AnnouncementSignatures>
5251 NS::Target: NodeSigner,
5254 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5258 if !self.context.is_usable() {
5262 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5263 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5267 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5271 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5272 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5275 log_trace!(logger, "{:?}", e);
5279 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5281 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5286 match &self.context.holder_signer {
5287 ChannelSignerType::Ecdsa(ecdsa) => {
5288 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5290 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5295 let short_channel_id = match self.context.get_short_channel_id() {
5297 None => return None,
5300 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5302 Some(msgs::AnnouncementSignatures {
5303 channel_id: self.context.channel_id(),
5305 node_signature: our_node_sig,
5306 bitcoin_signature: our_bitcoin_sig,
5312 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5314 fn sign_channel_announcement<NS: Deref>(
5315 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5316 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5317 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5318 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5319 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5320 let were_node_one = announcement.node_id_1 == our_node_key;
5322 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5323 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5324 match &self.context.holder_signer {
5325 ChannelSignerType::Ecdsa(ecdsa) => {
5326 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5327 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5328 Ok(msgs::ChannelAnnouncement {
5329 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5330 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5331 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5332 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5333 contents: announcement,
5338 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5342 /// Processes an incoming announcement_signatures message, providing a fully-signed
5343 /// channel_announcement message which we can broadcast and storing our counterparty's
5344 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5345 pub fn announcement_signatures<NS: Deref>(
5346 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5347 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5348 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5349 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5351 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5353 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5354 return Err(ChannelError::Close(format!(
5355 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5356 &announcement, self.context.get_counterparty_node_id())));
5358 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5359 return Err(ChannelError::Close(format!(
5360 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5361 &announcement, self.context.counterparty_funding_pubkey())));
5364 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5365 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5366 return Err(ChannelError::Ignore(
5367 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5370 self.sign_channel_announcement(node_signer, announcement)
5373 /// Gets a signed channel_announcement for this channel, if we previously received an
5374 /// announcement_signatures from our counterparty.
5375 pub fn get_signed_channel_announcement<NS: Deref>(
5376 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5377 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5378 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5381 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5383 Err(_) => return None,
5385 match self.sign_channel_announcement(node_signer, announcement) {
5386 Ok(res) => Some(res),
5391 /// May panic if called on a channel that wasn't immediately-previously
5392 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5393 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5394 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5395 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5396 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5397 // current to_remote balances. However, it no longer has any use, and thus is now simply
5398 // set to a dummy (but valid, as required by the spec) public key.
5399 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5400 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5401 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5402 let mut pk = [2; 33]; pk[1] = 0xff;
5403 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5404 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5405 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5406 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5409 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5412 self.mark_awaiting_response();
5413 msgs::ChannelReestablish {
5414 channel_id: self.context.channel_id(),
5415 // The protocol has two different commitment number concepts - the "commitment
5416 // transaction number", which starts from 0 and counts up, and the "revocation key
5417 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5418 // commitment transaction numbers by the index which will be used to reveal the
5419 // revocation key for that commitment transaction, which means we have to convert them
5420 // to protocol-level commitment numbers here...
5422 // next_local_commitment_number is the next commitment_signed number we expect to
5423 // receive (indicating if they need to resend one that we missed).
5424 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5425 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5426 // receive, however we track it by the next commitment number for a remote transaction
5427 // (which is one further, as they always revoke previous commitment transaction, not
5428 // the one we send) so we have to decrement by 1. Note that if
5429 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5430 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5432 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5433 your_last_per_commitment_secret: remote_last_secret,
5434 my_current_per_commitment_point: dummy_pubkey,
5435 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5436 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5437 // txid of that interactive transaction, else we MUST NOT set it.
5438 next_funding_txid: None,
5443 // Send stuff to our remote peers:
5445 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5446 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5447 /// commitment update.
5449 /// `Err`s will only be [`ChannelError::Ignore`].
5450 pub fn queue_add_htlc<F: Deref, L: Deref>(
5451 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5452 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5453 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5454 ) -> Result<(), ChannelError>
5455 where F::Target: FeeEstimator, L::Target: Logger
5458 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5459 skimmed_fee_msat, fee_estimator, logger)
5460 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5462 if let ChannelError::Ignore(_) = err { /* fine */ }
5463 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5468 /// Adds a pending outbound HTLC to this channel, note that you probably want
5469 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5471 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5473 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5474 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5476 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5477 /// we may not yet have sent the previous commitment update messages and will need to
5478 /// regenerate them.
5480 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5481 /// on this [`Channel`] if `force_holding_cell` is false.
5483 /// `Err`s will only be [`ChannelError::Ignore`].
5484 fn send_htlc<F: Deref, L: Deref>(
5485 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5486 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5487 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5488 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5489 where F::Target: FeeEstimator, L::Target: Logger
5491 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5492 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5494 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5495 if amount_msat > channel_total_msat {
5496 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5499 if amount_msat == 0 {
5500 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5503 let available_balances = self.context.get_available_balances(fee_estimator);
5504 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5505 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5506 available_balances.next_outbound_htlc_minimum_msat)));
5509 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5510 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5511 available_balances.next_outbound_htlc_limit_msat)));
5514 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5515 // Note that this should never really happen, if we're !is_live() on receipt of an
5516 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5517 // the user to send directly into a !is_live() channel. However, if we
5518 // disconnected during the time the previous hop was doing the commitment dance we may
5519 // end up getting here after the forwarding delay. In any case, returning an
5520 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5521 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5524 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5525 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5526 payment_hash, amount_msat,
5527 if force_holding_cell { "into holding cell" }
5528 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5529 else { "to peer" });
5531 if need_holding_cell {
5532 force_holding_cell = true;
5535 // Now update local state:
5536 if force_holding_cell {
5537 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5542 onion_routing_packet,
5548 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5549 htlc_id: self.context.next_holder_htlc_id,
5551 payment_hash: payment_hash.clone(),
5553 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5558 let res = msgs::UpdateAddHTLC {
5559 channel_id: self.context.channel_id,
5560 htlc_id: self.context.next_holder_htlc_id,
5564 onion_routing_packet,
5567 self.context.next_holder_htlc_id += 1;
5572 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5573 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5574 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5575 // fail to generate this, we still are at least at a position where upgrading their status
5577 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5578 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5579 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5581 if let Some(state) = new_state {
5582 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5586 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5587 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5588 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5589 // Grab the preimage, if it exists, instead of cloning
5590 let mut reason = OutboundHTLCOutcome::Success(None);
5591 mem::swap(outcome, &mut reason);
5592 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5595 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5596 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5597 debug_assert!(!self.context.is_outbound());
5598 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5599 self.context.feerate_per_kw = feerate;
5600 self.context.pending_update_fee = None;
5603 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5605 let (mut htlcs_ref, counterparty_commitment_tx) =
5606 self.build_commitment_no_state_update(logger);
5607 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5608 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5609 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5611 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5612 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5615 self.context.latest_monitor_update_id += 1;
5616 let monitor_update = ChannelMonitorUpdate {
5617 update_id: self.context.latest_monitor_update_id,
5618 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5619 commitment_txid: counterparty_commitment_txid,
5620 htlc_outputs: htlcs.clone(),
5621 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5622 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5623 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5624 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5625 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5628 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5632 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5633 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5634 where L::Target: Logger
5636 let counterparty_keys = self.context.build_remote_transaction_keys();
5637 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5638 let counterparty_commitment_tx = commitment_stats.tx;
5640 #[cfg(any(test, fuzzing))]
5642 if !self.context.is_outbound() {
5643 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5644 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5645 if let Some(info) = projected_commit_tx_info {
5646 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5647 if info.total_pending_htlcs == total_pending_htlcs
5648 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5649 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5650 && info.feerate == self.context.feerate_per_kw {
5651 let actual_fee = commit_tx_fee_sat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type()) * 1000;
5652 assert_eq!(actual_fee, info.fee);
5658 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5661 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5662 /// generation when we shouldn't change HTLC/channel state.
5663 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5664 // Get the fee tests from `build_commitment_no_state_update`
5665 #[cfg(any(test, fuzzing))]
5666 self.build_commitment_no_state_update(logger);
5668 let counterparty_keys = self.context.build_remote_transaction_keys();
5669 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5670 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5672 match &self.context.holder_signer {
5673 ChannelSignerType::Ecdsa(ecdsa) => {
5674 let (signature, htlc_signatures);
5677 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5678 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5682 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5683 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5685 htlc_signatures = res.1;
5687 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5688 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5689 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5690 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5692 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5693 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5694 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5695 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5696 log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5697 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5701 Ok((msgs::CommitmentSigned {
5702 channel_id: self.context.channel_id,
5706 partial_signature_with_nonce: None,
5707 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5712 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5713 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5715 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5716 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5717 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5718 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5719 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5720 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5721 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5722 where F::Target: FeeEstimator, L::Target: Logger
5724 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5725 onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5726 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5729 let monitor_update = self.build_commitment_no_status_check(logger);
5730 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5731 Ok(self.push_ret_blockable_mon_update(monitor_update))
5737 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5739 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5740 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5741 fee_base_msat: msg.contents.fee_base_msat,
5742 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5743 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5745 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5747 self.context.counterparty_forwarding_info = new_forwarding_info;
5753 /// Begins the shutdown process, getting a message for the remote peer and returning all
5754 /// holding cell HTLCs for payment failure.
5756 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5757 /// [`ChannelMonitorUpdate`] will be returned).
5758 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5759 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5760 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5762 for htlc in self.context.pending_outbound_htlcs.iter() {
5763 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5764 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5767 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5768 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5769 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5771 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5772 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5775 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5776 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5778 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5779 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5780 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5783 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5784 // script is set, we just force-close and call it a day.
5785 let mut chan_closed = false;
5786 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5790 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5792 None if !chan_closed => {
5793 // use override shutdown script if provided
5794 let shutdown_scriptpubkey = match override_shutdown_script {
5795 Some(script) => script,
5797 // otherwise, use the shutdown scriptpubkey provided by the signer
5798 match signer_provider.get_shutdown_scriptpubkey() {
5799 Ok(scriptpubkey) => scriptpubkey,
5800 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5804 if !shutdown_scriptpubkey.is_compatible(their_features) {
5805 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5807 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5813 // From here on out, we may not fail!
5814 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5815 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5816 let shutdown_result = ShutdownResult {
5817 monitor_update: None,
5818 dropped_outbound_htlcs: Vec::new(),
5819 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5821 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5822 Some(shutdown_result)
5824 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5827 self.context.update_time_counter += 1;
5829 let monitor_update = if update_shutdown_script {
5830 self.context.latest_monitor_update_id += 1;
5831 let monitor_update = ChannelMonitorUpdate {
5832 update_id: self.context.latest_monitor_update_id,
5833 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5834 scriptpubkey: self.get_closing_scriptpubkey(),
5837 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5838 self.push_ret_blockable_mon_update(monitor_update)
5840 let shutdown = msgs::Shutdown {
5841 channel_id: self.context.channel_id,
5842 scriptpubkey: self.get_closing_scriptpubkey(),
5845 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5846 // our shutdown until we've committed all of the pending changes.
5847 self.context.holding_cell_update_fee = None;
5848 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5849 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5851 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5852 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5859 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5860 "we can't both complete shutdown and return a monitor update");
5862 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5865 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5866 self.context.holding_cell_htlc_updates.iter()
5867 .flat_map(|htlc_update| {
5869 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5870 => Some((source, payment_hash)),
5874 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5878 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5879 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5880 pub context: ChannelContext<SP>,
5881 pub unfunded_context: UnfundedChannelContext,
5884 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5885 pub fn new<ES: Deref, F: Deref>(
5886 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5887 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5888 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
5889 ) -> Result<OutboundV1Channel<SP>, APIError>
5890 where ES::Target: EntropySource,
5891 F::Target: FeeEstimator
5893 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5894 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5895 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5896 let pubkeys = holder_signer.pubkeys().clone();
5898 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5899 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5901 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5902 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5904 let channel_value_msat = channel_value_satoshis * 1000;
5905 if push_msat > channel_value_msat {
5906 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5908 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5909 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5911 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5912 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5913 // Protocol level safety check in place, although it should never happen because
5914 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5915 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5918 let channel_type = Self::get_initial_channel_type(&config, their_features);
5919 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5921 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5922 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5924 (ConfirmationTarget::NonAnchorChannelFee, 0)
5926 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5928 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5929 let commitment_tx_fee = commit_tx_fee_sat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) * 1000;
5930 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5931 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5934 let mut secp_ctx = Secp256k1::new();
5935 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5937 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5938 match signer_provider.get_shutdown_scriptpubkey() {
5939 Ok(scriptpubkey) => Some(scriptpubkey),
5940 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5944 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5945 if !shutdown_scriptpubkey.is_compatible(&their_features) {
5946 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5950 let destination_script = match signer_provider.get_destination_script() {
5951 Ok(script) => script,
5952 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5955 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
5958 context: ChannelContext {
5961 config: LegacyChannelConfig {
5962 options: config.channel_config.clone(),
5963 announced_channel: config.channel_handshake_config.announced_channel,
5964 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5969 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5971 channel_id: temporary_channel_id,
5972 temporary_channel_id: Some(temporary_channel_id),
5973 channel_state: ChannelState::OurInitSent as u32,
5974 announcement_sigs_state: AnnouncementSigsState::NotSent,
5976 channel_value_satoshis,
5978 latest_monitor_update_id: 0,
5980 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
5981 shutdown_scriptpubkey,
5984 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5985 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5988 pending_inbound_htlcs: Vec::new(),
5989 pending_outbound_htlcs: Vec::new(),
5990 holding_cell_htlc_updates: Vec::new(),
5991 pending_update_fee: None,
5992 holding_cell_update_fee: None,
5993 next_holder_htlc_id: 0,
5994 next_counterparty_htlc_id: 0,
5995 update_time_counter: 1,
5997 resend_order: RAACommitmentOrder::CommitmentFirst,
5999 monitor_pending_channel_ready: false,
6000 monitor_pending_revoke_and_ack: false,
6001 monitor_pending_commitment_signed: false,
6002 monitor_pending_forwards: Vec::new(),
6003 monitor_pending_failures: Vec::new(),
6004 monitor_pending_finalized_fulfills: Vec::new(),
6006 signer_pending_commitment_update: false,
6007 signer_pending_funding: false,
6009 #[cfg(debug_assertions)]
6010 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6011 #[cfg(debug_assertions)]
6012 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6014 last_sent_closing_fee: None,
6015 pending_counterparty_closing_signed: None,
6016 expecting_peer_commitment_signed: false,
6017 closing_fee_limits: None,
6018 target_closing_feerate_sats_per_kw: None,
6020 funding_tx_confirmed_in: None,
6021 funding_tx_confirmation_height: 0,
6022 short_channel_id: None,
6023 channel_creation_height: current_chain_height,
6025 feerate_per_kw: commitment_feerate,
6026 counterparty_dust_limit_satoshis: 0,
6027 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6028 counterparty_max_htlc_value_in_flight_msat: 0,
6029 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6030 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6031 holder_selected_channel_reserve_satoshis,
6032 counterparty_htlc_minimum_msat: 0,
6033 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6034 counterparty_max_accepted_htlcs: 0,
6035 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6036 minimum_depth: None, // Filled in in accept_channel
6038 counterparty_forwarding_info: None,
6040 channel_transaction_parameters: ChannelTransactionParameters {
6041 holder_pubkeys: pubkeys,
6042 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6043 is_outbound_from_holder: true,
6044 counterparty_parameters: None,
6045 funding_outpoint: None,
6046 channel_type_features: channel_type.clone()
6048 funding_transaction: None,
6049 is_batch_funding: None,
6051 counterparty_cur_commitment_point: None,
6052 counterparty_prev_commitment_point: None,
6053 counterparty_node_id,
6055 counterparty_shutdown_scriptpubkey: None,
6057 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6059 channel_update_status: ChannelUpdateStatus::Enabled,
6060 closing_signed_in_flight: false,
6062 announcement_sigs: None,
6064 #[cfg(any(test, fuzzing))]
6065 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6066 #[cfg(any(test, fuzzing))]
6067 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6069 workaround_lnd_bug_4006: None,
6070 sent_message_awaiting_response: None,
6072 latest_inbound_scid_alias: None,
6073 outbound_scid_alias,
6075 channel_pending_event_emitted: false,
6076 channel_ready_event_emitted: false,
6078 #[cfg(any(test, fuzzing))]
6079 historical_inbound_htlc_fulfills: HashSet::new(),
6084 blocked_monitor_updates: Vec::new(),
6086 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6090 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6091 /// a funding_created message for the remote peer.
6092 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6093 /// or if called on an inbound channel.
6094 /// Note that channel_id changes during this call!
6095 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6096 /// If an Err is returned, it is a ChannelError::Close.
6097 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6098 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6099 if !self.context.is_outbound() {
6100 panic!("Tried to create outbound funding_created message on an inbound channel!");
6102 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6103 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6105 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6106 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6107 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6108 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6111 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6112 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6114 // Now that we're past error-generating stuff, update our local state:
6116 self.context.channel_state = ChannelState::FundingCreated as u32;
6117 self.context.channel_id = funding_txo.to_channel_id();
6119 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6120 // We can skip this if it is a zero-conf channel.
6121 if funding_transaction.is_coin_base() &&
6122 self.context.minimum_depth.unwrap_or(0) > 0 &&
6123 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6124 self.context.minimum_depth = Some(COINBASE_MATURITY);
6127 self.context.funding_transaction = Some(funding_transaction);
6128 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6130 let funding_created = self.context.get_funding_created_msg(logger);
6131 if funding_created.is_none() {
6132 if !self.context.signer_pending_funding {
6133 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6134 self.context.signer_pending_funding = true;
6138 let channel = Channel {
6139 context: self.context,
6142 Ok((channel, funding_created))
6145 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6146 // The default channel type (ie the first one we try) depends on whether the channel is
6147 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6148 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6149 // with no other changes, and fall back to `only_static_remotekey`.
6150 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6151 if !config.channel_handshake_config.announced_channel &&
6152 config.channel_handshake_config.negotiate_scid_privacy &&
6153 their_features.supports_scid_privacy() {
6154 ret.set_scid_privacy_required();
6157 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6158 // set it now. If they don't understand it, we'll fall back to our default of
6159 // `only_static_remotekey`.
6160 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6161 their_features.supports_anchors_zero_fee_htlc_tx() {
6162 ret.set_anchors_zero_fee_htlc_tx_required();
6168 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6169 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6170 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6171 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6172 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6173 ) -> Result<msgs::OpenChannel, ()>
6175 F::Target: FeeEstimator
6177 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6178 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6179 // We've exhausted our options
6182 // We support opening a few different types of channels. Try removing our additional
6183 // features one by one until we've either arrived at our default or the counterparty has
6186 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6187 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6188 // checks whether the counterparty supports every feature, this would only happen if the
6189 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6191 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6192 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6193 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6194 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6195 } else if self.context.channel_type.supports_scid_privacy() {
6196 self.context.channel_type.clear_scid_privacy();
6198 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6200 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6201 Ok(self.get_open_channel(chain_hash))
6204 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6205 if !self.context.is_outbound() {
6206 panic!("Tried to open a channel for an inbound channel?");
6208 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6209 panic!("Cannot generate an open_channel after we've moved forward");
6212 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6213 panic!("Tried to send an open_channel for a channel that has already advanced");
6216 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6217 let keys = self.context.get_holder_pubkeys();
6221 temporary_channel_id: self.context.channel_id,
6222 funding_satoshis: self.context.channel_value_satoshis,
6223 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6224 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6225 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6226 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6227 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6228 feerate_per_kw: self.context.feerate_per_kw as u32,
6229 to_self_delay: self.context.get_holder_selected_contest_delay(),
6230 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6231 funding_pubkey: keys.funding_pubkey,
6232 revocation_basepoint: keys.revocation_basepoint,
6233 payment_point: keys.payment_point,
6234 delayed_payment_basepoint: keys.delayed_payment_basepoint,
6235 htlc_basepoint: keys.htlc_basepoint,
6236 first_per_commitment_point,
6237 channel_flags: if self.context.config.announced_channel {1} else {0},
6238 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6239 Some(script) => script.clone().into_inner(),
6240 None => Builder::new().into_script(),
6242 channel_type: Some(self.context.channel_type.clone()),
6247 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6248 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6250 // Check sanity of message fields:
6251 if !self.context.is_outbound() {
6252 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6254 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6255 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6257 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6258 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6260 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6261 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6263 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6264 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6266 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6267 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6268 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6270 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6271 if msg.htlc_minimum_msat >= full_channel_value_msat {
6272 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6274 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6275 if msg.to_self_delay > max_delay_acceptable {
6276 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6278 if msg.max_accepted_htlcs < 1 {
6279 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6281 if msg.max_accepted_htlcs > MAX_HTLCS {
6282 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6285 // Now check against optional parameters as set by config...
6286 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6287 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6289 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6290 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6292 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6293 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6295 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6296 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6298 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6299 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6301 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6302 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6304 if msg.minimum_depth > peer_limits.max_minimum_depth {
6305 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6308 if let Some(ty) = &msg.channel_type {
6309 if *ty != self.context.channel_type {
6310 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6312 } else if their_features.supports_channel_type() {
6313 // Assume they've accepted the channel type as they said they understand it.
6315 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6316 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6317 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6319 self.context.channel_type = channel_type.clone();
6320 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6323 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6324 match &msg.shutdown_scriptpubkey {
6325 &Some(ref script) => {
6326 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6327 if script.len() == 0 {
6330 if !script::is_bolt2_compliant(&script, their_features) {
6331 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6333 Some(script.clone())
6336 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6338 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6343 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6344 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6345 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6346 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6347 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6349 if peer_limits.trust_own_funding_0conf {
6350 self.context.minimum_depth = Some(msg.minimum_depth);
6352 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6355 let counterparty_pubkeys = ChannelPublicKeys {
6356 funding_pubkey: msg.funding_pubkey,
6357 revocation_basepoint: msg.revocation_basepoint,
6358 payment_point: msg.payment_point,
6359 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6360 htlc_basepoint: msg.htlc_basepoint
6363 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6364 selected_contest_delay: msg.to_self_delay,
6365 pubkeys: counterparty_pubkeys,
6368 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6369 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6371 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6372 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6378 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6379 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6380 pub context: ChannelContext<SP>,
6381 pub unfunded_context: UnfundedChannelContext,
6384 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6385 /// Creates a new channel from a remote sides' request for one.
6386 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6387 pub fn new<ES: Deref, F: Deref, L: Deref>(
6388 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6389 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6390 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6391 current_chain_height: u32, logger: &L, is_0conf: bool,
6392 ) -> Result<InboundV1Channel<SP>, ChannelError>
6393 where ES::Target: EntropySource,
6394 F::Target: FeeEstimator,
6397 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6399 // First check the channel type is known, failing before we do anything else if we don't
6400 // support this channel type.
6401 let channel_type = if let Some(channel_type) = &msg.channel_type {
6402 if channel_type.supports_any_optional_bits() {
6403 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6406 // We only support the channel types defined by the `ChannelManager` in
6407 // `provided_channel_type_features`. The channel type must always support
6408 // `static_remote_key`.
6409 if !channel_type.requires_static_remote_key() {
6410 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6412 // Make sure we support all of the features behind the channel type.
6413 if !channel_type.is_subset(our_supported_features) {
6414 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6416 if channel_type.requires_scid_privacy() && announced_channel {
6417 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6419 channel_type.clone()
6421 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6422 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6423 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6428 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6429 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6430 let pubkeys = holder_signer.pubkeys().clone();
6431 let counterparty_pubkeys = ChannelPublicKeys {
6432 funding_pubkey: msg.funding_pubkey,
6433 revocation_basepoint: msg.revocation_basepoint,
6434 payment_point: msg.payment_point,
6435 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6436 htlc_basepoint: msg.htlc_basepoint
6439 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6440 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6443 // Check sanity of message fields:
6444 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6445 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6447 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6448 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6450 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6451 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6453 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6454 if msg.push_msat > full_channel_value_msat {
6455 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6457 if msg.dust_limit_satoshis > msg.funding_satoshis {
6458 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6460 if msg.htlc_minimum_msat >= full_channel_value_msat {
6461 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6463 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6465 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6466 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6467 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6469 if msg.max_accepted_htlcs < 1 {
6470 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6472 if msg.max_accepted_htlcs > MAX_HTLCS {
6473 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6476 // Now check against optional parameters as set by config...
6477 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6478 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6480 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6481 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6483 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6484 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6486 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6487 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6489 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6490 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6492 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6493 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6495 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6496 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6499 // Convert things into internal flags and prep our state:
6501 if config.channel_handshake_limits.force_announced_channel_preference {
6502 if config.channel_handshake_config.announced_channel != announced_channel {
6503 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6507 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6508 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6509 // Protocol level safety check in place, although it should never happen because
6510 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6511 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6513 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6514 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6516 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6517 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6518 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6520 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6521 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6524 // check if the funder's amount for the initial commitment tx is sufficient
6525 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6526 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6527 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6531 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6532 let commitment_tx_fee = commit_tx_fee_sat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6533 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6534 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6537 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6538 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6539 // want to push much to us), our counterparty should always have more than our reserve.
6540 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6541 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6544 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6545 match &msg.shutdown_scriptpubkey {
6546 &Some(ref script) => {
6547 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6548 if script.len() == 0 {
6551 if !script::is_bolt2_compliant(&script, their_features) {
6552 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6554 Some(script.clone())
6557 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6559 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6564 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6565 match signer_provider.get_shutdown_scriptpubkey() {
6566 Ok(scriptpubkey) => Some(scriptpubkey),
6567 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6571 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6572 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6573 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6577 let destination_script = match signer_provider.get_destination_script() {
6578 Ok(script) => script,
6579 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6582 let mut secp_ctx = Secp256k1::new();
6583 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6585 let minimum_depth = if is_0conf {
6588 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6592 context: ChannelContext {
6595 config: LegacyChannelConfig {
6596 options: config.channel_config.clone(),
6598 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6603 inbound_handshake_limits_override: None,
6605 temporary_channel_id: Some(msg.temporary_channel_id),
6606 channel_id: msg.temporary_channel_id,
6607 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6608 announcement_sigs_state: AnnouncementSigsState::NotSent,
6611 latest_monitor_update_id: 0,
6613 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6614 shutdown_scriptpubkey,
6617 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6618 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6619 value_to_self_msat: msg.push_msat,
6621 pending_inbound_htlcs: Vec::new(),
6622 pending_outbound_htlcs: Vec::new(),
6623 holding_cell_htlc_updates: Vec::new(),
6624 pending_update_fee: None,
6625 holding_cell_update_fee: None,
6626 next_holder_htlc_id: 0,
6627 next_counterparty_htlc_id: 0,
6628 update_time_counter: 1,
6630 resend_order: RAACommitmentOrder::CommitmentFirst,
6632 monitor_pending_channel_ready: false,
6633 monitor_pending_revoke_and_ack: false,
6634 monitor_pending_commitment_signed: false,
6635 monitor_pending_forwards: Vec::new(),
6636 monitor_pending_failures: Vec::new(),
6637 monitor_pending_finalized_fulfills: Vec::new(),
6639 signer_pending_commitment_update: false,
6640 signer_pending_funding: false,
6642 #[cfg(debug_assertions)]
6643 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6644 #[cfg(debug_assertions)]
6645 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6647 last_sent_closing_fee: None,
6648 pending_counterparty_closing_signed: None,
6649 expecting_peer_commitment_signed: false,
6650 closing_fee_limits: None,
6651 target_closing_feerate_sats_per_kw: None,
6653 funding_tx_confirmed_in: None,
6654 funding_tx_confirmation_height: 0,
6655 short_channel_id: None,
6656 channel_creation_height: current_chain_height,
6658 feerate_per_kw: msg.feerate_per_kw,
6659 channel_value_satoshis: msg.funding_satoshis,
6660 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6661 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6662 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6663 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6664 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6665 holder_selected_channel_reserve_satoshis,
6666 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6667 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6668 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6669 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6672 counterparty_forwarding_info: None,
6674 channel_transaction_parameters: ChannelTransactionParameters {
6675 holder_pubkeys: pubkeys,
6676 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6677 is_outbound_from_holder: false,
6678 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6679 selected_contest_delay: msg.to_self_delay,
6680 pubkeys: counterparty_pubkeys,
6682 funding_outpoint: None,
6683 channel_type_features: channel_type.clone()
6685 funding_transaction: None,
6686 is_batch_funding: None,
6688 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6689 counterparty_prev_commitment_point: None,
6690 counterparty_node_id,
6692 counterparty_shutdown_scriptpubkey,
6694 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6696 channel_update_status: ChannelUpdateStatus::Enabled,
6697 closing_signed_in_flight: false,
6699 announcement_sigs: None,
6701 #[cfg(any(test, fuzzing))]
6702 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6703 #[cfg(any(test, fuzzing))]
6704 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6706 workaround_lnd_bug_4006: None,
6707 sent_message_awaiting_response: None,
6709 latest_inbound_scid_alias: None,
6710 outbound_scid_alias: 0,
6712 channel_pending_event_emitted: false,
6713 channel_ready_event_emitted: false,
6715 #[cfg(any(test, fuzzing))]
6716 historical_inbound_htlc_fulfills: HashSet::new(),
6721 blocked_monitor_updates: Vec::new(),
6723 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6729 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6730 /// should be sent back to the counterparty node.
6732 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6733 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6734 if self.context.is_outbound() {
6735 panic!("Tried to send accept_channel for an outbound channel?");
6737 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6738 panic!("Tried to send accept_channel after channel had moved forward");
6740 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6741 panic!("Tried to send an accept_channel for a channel that has already advanced");
6744 self.generate_accept_channel_message()
6747 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6748 /// inbound channel. If the intention is to accept an inbound channel, use
6749 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6751 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6752 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6753 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6754 let keys = self.context.get_holder_pubkeys();
6756 msgs::AcceptChannel {
6757 temporary_channel_id: self.context.channel_id,
6758 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6759 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6760 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6761 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6762 minimum_depth: self.context.minimum_depth.unwrap(),
6763 to_self_delay: self.context.get_holder_selected_contest_delay(),
6764 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6765 funding_pubkey: keys.funding_pubkey,
6766 revocation_basepoint: keys.revocation_basepoint,
6767 payment_point: keys.payment_point,
6768 delayed_payment_basepoint: keys.delayed_payment_basepoint,
6769 htlc_basepoint: keys.htlc_basepoint,
6770 first_per_commitment_point,
6771 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6772 Some(script) => script.clone().into_inner(),
6773 None => Builder::new().into_script(),
6775 channel_type: Some(self.context.channel_type.clone()),
6777 next_local_nonce: None,
6781 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6782 /// inbound channel without accepting it.
6784 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6786 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6787 self.generate_accept_channel_message()
6790 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6791 let funding_script = self.context.get_funding_redeemscript();
6793 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6794 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6795 let trusted_tx = initial_commitment_tx.trust();
6796 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6797 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6798 // They sign the holder commitment transaction...
6799 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6800 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6801 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6802 encode::serialize_hex(&funding_script), &self.context.channel_id());
6803 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6805 Ok(initial_commitment_tx)
6808 pub fn funding_created<L: Deref>(
6809 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6810 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
6814 if self.context.is_outbound() {
6815 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6817 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6818 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6819 // remember the channel, so it's safe to just send an error_message here and drop the
6821 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6823 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6824 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6825 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6826 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6829 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6830 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6831 // This is an externally observable change before we finish all our checks. In particular
6832 // check_funding_created_signature may fail.
6833 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6835 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6837 Err(ChannelError::Close(e)) => {
6838 self.context.channel_transaction_parameters.funding_outpoint = None;
6839 return Err((self, ChannelError::Close(e)));
6842 // The only error we know how to handle is ChannelError::Close, so we fall over here
6843 // to make sure we don't continue with an inconsistent state.
6844 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6848 let holder_commitment_tx = HolderCommitmentTransaction::new(
6849 initial_commitment_tx,
6852 &self.context.get_holder_pubkeys().funding_pubkey,
6853 self.context.counterparty_funding_pubkey()
6856 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6857 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6860 // Now that we're past error-generating stuff, update our local state:
6862 self.context.channel_state = ChannelState::FundingSent as u32;
6863 self.context.channel_id = funding_txo.to_channel_id();
6864 self.context.cur_counterparty_commitment_transaction_number -= 1;
6865 self.context.cur_holder_commitment_transaction_number -= 1;
6867 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6869 let funding_redeemscript = self.context.get_funding_redeemscript();
6870 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6871 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6872 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6873 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6874 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6875 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6876 shutdown_script, self.context.get_holder_selected_contest_delay(),
6877 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6878 &self.context.channel_transaction_parameters,
6879 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6881 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6883 channel_monitor.provide_initial_counterparty_commitment_tx(
6884 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6885 self.context.cur_counterparty_commitment_transaction_number + 1,
6886 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6887 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6888 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6890 log_info!(logger, "{} funding_signed for peer for channel {}",
6891 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
6893 // Promote the channel to a full-fledged one now that we have updated the state and have a
6894 // `ChannelMonitor`.
6895 let mut channel = Channel {
6896 context: self.context,
6898 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6899 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6901 Ok((channel, funding_signed, channel_monitor))
6905 const SERIALIZATION_VERSION: u8 = 3;
6906 const MIN_SERIALIZATION_VERSION: u8 = 3;
6908 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6914 impl Writeable for ChannelUpdateStatus {
6915 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6916 // We only care about writing out the current state as it was announced, ie only either
6917 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6918 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6920 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6921 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6922 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6923 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6929 impl Readable for ChannelUpdateStatus {
6930 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6931 Ok(match <u8 as Readable>::read(reader)? {
6932 0 => ChannelUpdateStatus::Enabled,
6933 1 => ChannelUpdateStatus::Disabled,
6934 _ => return Err(DecodeError::InvalidValue),
6939 impl Writeable for AnnouncementSigsState {
6940 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6941 // We only care about writing out the current state as if we had just disconnected, at
6942 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6944 AnnouncementSigsState::NotSent => 0u8.write(writer),
6945 AnnouncementSigsState::MessageSent => 0u8.write(writer),
6946 AnnouncementSigsState::Committed => 0u8.write(writer),
6947 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6952 impl Readable for AnnouncementSigsState {
6953 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6954 Ok(match <u8 as Readable>::read(reader)? {
6955 0 => AnnouncementSigsState::NotSent,
6956 1 => AnnouncementSigsState::PeerReceived,
6957 _ => return Err(DecodeError::InvalidValue),
6962 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
6963 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6964 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6967 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6969 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6970 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6971 // the low bytes now and the optional high bytes later.
6972 let user_id_low = self.context.user_id as u64;
6973 user_id_low.write(writer)?;
6975 // Version 1 deserializers expected to read parts of the config object here. Version 2
6976 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6977 // `minimum_depth` we simply write dummy values here.
6978 writer.write_all(&[0; 8])?;
6980 self.context.channel_id.write(writer)?;
6981 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6982 self.context.channel_value_satoshis.write(writer)?;
6984 self.context.latest_monitor_update_id.write(writer)?;
6986 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6987 // deserialized from that format.
6988 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6989 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6990 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6992 self.context.destination_script.write(writer)?;
6994 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6995 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6996 self.context.value_to_self_msat.write(writer)?;
6998 let mut dropped_inbound_htlcs = 0;
6999 for htlc in self.context.pending_inbound_htlcs.iter() {
7000 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7001 dropped_inbound_htlcs += 1;
7004 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7005 for htlc in self.context.pending_inbound_htlcs.iter() {
7006 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7009 htlc.htlc_id.write(writer)?;
7010 htlc.amount_msat.write(writer)?;
7011 htlc.cltv_expiry.write(writer)?;
7012 htlc.payment_hash.write(writer)?;
7014 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7015 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7017 htlc_state.write(writer)?;
7019 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7021 htlc_state.write(writer)?;
7023 &InboundHTLCState::Committed => {
7026 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7028 removal_reason.write(writer)?;
7033 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7034 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7036 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7037 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7038 htlc.htlc_id.write(writer)?;
7039 htlc.amount_msat.write(writer)?;
7040 htlc.cltv_expiry.write(writer)?;
7041 htlc.payment_hash.write(writer)?;
7042 htlc.source.write(writer)?;
7044 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7046 onion_packet.write(writer)?;
7048 &OutboundHTLCState::Committed => {
7051 &OutboundHTLCState::RemoteRemoved(_) => {
7052 // Treat this as a Committed because we haven't received the CS - they'll
7053 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7056 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7058 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7059 preimages.push(preimage);
7061 let reason: Option<&HTLCFailReason> = outcome.into();
7062 reason.write(writer)?;
7064 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7066 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7067 preimages.push(preimage);
7069 let reason: Option<&HTLCFailReason> = outcome.into();
7070 reason.write(writer)?;
7073 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7074 if pending_outbound_skimmed_fees.is_empty() {
7075 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7077 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7078 } else if !pending_outbound_skimmed_fees.is_empty() {
7079 pending_outbound_skimmed_fees.push(None);
7083 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7084 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7085 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7087 &HTLCUpdateAwaitingACK::AddHTLC {
7088 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7092 amount_msat.write(writer)?;
7093 cltv_expiry.write(writer)?;
7094 payment_hash.write(writer)?;
7095 source.write(writer)?;
7096 onion_routing_packet.write(writer)?;
7098 if let Some(skimmed_fee) = skimmed_fee_msat {
7099 if holding_cell_skimmed_fees.is_empty() {
7100 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7102 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7103 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7105 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7107 payment_preimage.write(writer)?;
7108 htlc_id.write(writer)?;
7110 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7112 htlc_id.write(writer)?;
7113 err_packet.write(writer)?;
7118 match self.context.resend_order {
7119 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7120 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7123 self.context.monitor_pending_channel_ready.write(writer)?;
7124 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7125 self.context.monitor_pending_commitment_signed.write(writer)?;
7127 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7128 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7129 pending_forward.write(writer)?;
7130 htlc_id.write(writer)?;
7133 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7134 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7135 htlc_source.write(writer)?;
7136 payment_hash.write(writer)?;
7137 fail_reason.write(writer)?;
7140 if self.context.is_outbound() {
7141 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7142 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7143 Some(feerate).write(writer)?;
7145 // As for inbound HTLCs, if the update was only announced and never committed in a
7146 // commitment_signed, drop it.
7147 None::<u32>.write(writer)?;
7149 self.context.holding_cell_update_fee.write(writer)?;
7151 self.context.next_holder_htlc_id.write(writer)?;
7152 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7153 self.context.update_time_counter.write(writer)?;
7154 self.context.feerate_per_kw.write(writer)?;
7156 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7157 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7158 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7159 // consider the stale state on reload.
7162 self.context.funding_tx_confirmed_in.write(writer)?;
7163 self.context.funding_tx_confirmation_height.write(writer)?;
7164 self.context.short_channel_id.write(writer)?;
7166 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7167 self.context.holder_dust_limit_satoshis.write(writer)?;
7168 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7170 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7171 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7173 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7174 self.context.holder_htlc_minimum_msat.write(writer)?;
7175 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7177 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7178 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7180 match &self.context.counterparty_forwarding_info {
7183 info.fee_base_msat.write(writer)?;
7184 info.fee_proportional_millionths.write(writer)?;
7185 info.cltv_expiry_delta.write(writer)?;
7187 None => 0u8.write(writer)?
7190 self.context.channel_transaction_parameters.write(writer)?;
7191 self.context.funding_transaction.write(writer)?;
7193 self.context.counterparty_cur_commitment_point.write(writer)?;
7194 self.context.counterparty_prev_commitment_point.write(writer)?;
7195 self.context.counterparty_node_id.write(writer)?;
7197 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7199 self.context.commitment_secrets.write(writer)?;
7201 self.context.channel_update_status.write(writer)?;
7203 #[cfg(any(test, fuzzing))]
7204 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7205 #[cfg(any(test, fuzzing))]
7206 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7207 htlc.write(writer)?;
7210 // If the channel type is something other than only-static-remote-key, then we need to have
7211 // older clients fail to deserialize this channel at all. If the type is
7212 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7214 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7215 Some(&self.context.channel_type) } else { None };
7217 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7218 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7219 // a different percentage of the channel value then 10%, which older versions of LDK used
7220 // to set it to before the percentage was made configurable.
7221 let serialized_holder_selected_reserve =
7222 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7223 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7225 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7226 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7227 let serialized_holder_htlc_max_in_flight =
7228 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7229 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7231 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7232 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7234 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7235 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7236 // we write the high bytes as an option here.
7237 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7239 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7241 write_tlv_fields!(writer, {
7242 (0, self.context.announcement_sigs, option),
7243 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7244 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7245 // them twice, once with their original default values above, and once as an option
7246 // here. On the read side, old versions will simply ignore the odd-type entries here,
7247 // and new versions map the default values to None and allow the TLV entries here to
7249 (1, self.context.minimum_depth, option),
7250 (2, chan_type, option),
7251 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7252 (4, serialized_holder_selected_reserve, option),
7253 (5, self.context.config, required),
7254 (6, serialized_holder_htlc_max_in_flight, option),
7255 (7, self.context.shutdown_scriptpubkey, option),
7256 (8, self.context.blocked_monitor_updates, optional_vec),
7257 (9, self.context.target_closing_feerate_sats_per_kw, option),
7258 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7259 (13, self.context.channel_creation_height, required),
7260 (15, preimages, required_vec),
7261 (17, self.context.announcement_sigs_state, required),
7262 (19, self.context.latest_inbound_scid_alias, option),
7263 (21, self.context.outbound_scid_alias, required),
7264 (23, channel_ready_event_emitted, option),
7265 (25, user_id_high_opt, option),
7266 (27, self.context.channel_keys_id, required),
7267 (28, holder_max_accepted_htlcs, option),
7268 (29, self.context.temporary_channel_id, option),
7269 (31, channel_pending_event_emitted, option),
7270 (35, pending_outbound_skimmed_fees, optional_vec),
7271 (37, holding_cell_skimmed_fees, optional_vec),
7272 (38, self.context.is_batch_funding, option),
7279 const MAX_ALLOC_SIZE: usize = 64*1024;
7280 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7282 ES::Target: EntropySource,
7283 SP::Target: SignerProvider
7285 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7286 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7287 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7289 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7290 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7291 // the low bytes now and the high bytes later.
7292 let user_id_low: u64 = Readable::read(reader)?;
7294 let mut config = Some(LegacyChannelConfig::default());
7296 // Read the old serialization of the ChannelConfig from version 0.0.98.
7297 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7298 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7299 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7300 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7302 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7303 let mut _val: u64 = Readable::read(reader)?;
7306 let channel_id = Readable::read(reader)?;
7307 let channel_state = Readable::read(reader)?;
7308 let channel_value_satoshis = Readable::read(reader)?;
7310 let latest_monitor_update_id = Readable::read(reader)?;
7312 let mut keys_data = None;
7314 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7315 // the `channel_keys_id` TLV is present below.
7316 let keys_len: u32 = Readable::read(reader)?;
7317 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7318 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7319 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7320 let mut data = [0; 1024];
7321 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7322 reader.read_exact(read_slice)?;
7323 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7327 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7328 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7329 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7332 let destination_script = Readable::read(reader)?;
7334 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7335 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7336 let value_to_self_msat = Readable::read(reader)?;
7338 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7340 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7341 for _ in 0..pending_inbound_htlc_count {
7342 pending_inbound_htlcs.push(InboundHTLCOutput {
7343 htlc_id: Readable::read(reader)?,
7344 amount_msat: Readable::read(reader)?,
7345 cltv_expiry: Readable::read(reader)?,
7346 payment_hash: Readable::read(reader)?,
7347 state: match <u8 as Readable>::read(reader)? {
7348 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7349 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7350 3 => InboundHTLCState::Committed,
7351 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7352 _ => return Err(DecodeError::InvalidValue),
7357 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7358 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7359 for _ in 0..pending_outbound_htlc_count {
7360 pending_outbound_htlcs.push(OutboundHTLCOutput {
7361 htlc_id: Readable::read(reader)?,
7362 amount_msat: Readable::read(reader)?,
7363 cltv_expiry: Readable::read(reader)?,
7364 payment_hash: Readable::read(reader)?,
7365 source: Readable::read(reader)?,
7366 state: match <u8 as Readable>::read(reader)? {
7367 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7368 1 => OutboundHTLCState::Committed,
7370 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7371 OutboundHTLCState::RemoteRemoved(option.into())
7374 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7375 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7378 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7379 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7381 _ => return Err(DecodeError::InvalidValue),
7383 skimmed_fee_msat: None,
7387 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7388 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7389 for _ in 0..holding_cell_htlc_update_count {
7390 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7391 0 => HTLCUpdateAwaitingACK::AddHTLC {
7392 amount_msat: Readable::read(reader)?,
7393 cltv_expiry: Readable::read(reader)?,
7394 payment_hash: Readable::read(reader)?,
7395 source: Readable::read(reader)?,
7396 onion_routing_packet: Readable::read(reader)?,
7397 skimmed_fee_msat: None,
7399 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7400 payment_preimage: Readable::read(reader)?,
7401 htlc_id: Readable::read(reader)?,
7403 2 => HTLCUpdateAwaitingACK::FailHTLC {
7404 htlc_id: Readable::read(reader)?,
7405 err_packet: Readable::read(reader)?,
7407 _ => return Err(DecodeError::InvalidValue),
7411 let resend_order = match <u8 as Readable>::read(reader)? {
7412 0 => RAACommitmentOrder::CommitmentFirst,
7413 1 => RAACommitmentOrder::RevokeAndACKFirst,
7414 _ => return Err(DecodeError::InvalidValue),
7417 let monitor_pending_channel_ready = Readable::read(reader)?;
7418 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7419 let monitor_pending_commitment_signed = Readable::read(reader)?;
7421 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7422 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7423 for _ in 0..monitor_pending_forwards_count {
7424 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7427 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7428 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7429 for _ in 0..monitor_pending_failures_count {
7430 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7433 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7435 let holding_cell_update_fee = Readable::read(reader)?;
7437 let next_holder_htlc_id = Readable::read(reader)?;
7438 let next_counterparty_htlc_id = Readable::read(reader)?;
7439 let update_time_counter = Readable::read(reader)?;
7440 let feerate_per_kw = Readable::read(reader)?;
7442 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7443 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7444 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7445 // consider the stale state on reload.
7446 match <u8 as Readable>::read(reader)? {
7449 let _: u32 = Readable::read(reader)?;
7450 let _: u64 = Readable::read(reader)?;
7451 let _: Signature = Readable::read(reader)?;
7453 _ => return Err(DecodeError::InvalidValue),
7456 let funding_tx_confirmed_in = Readable::read(reader)?;
7457 let funding_tx_confirmation_height = Readable::read(reader)?;
7458 let short_channel_id = Readable::read(reader)?;
7460 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7461 let holder_dust_limit_satoshis = Readable::read(reader)?;
7462 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7463 let mut counterparty_selected_channel_reserve_satoshis = None;
7465 // Read the old serialization from version 0.0.98.
7466 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7468 // Read the 8 bytes of backwards-compatibility data.
7469 let _dummy: u64 = Readable::read(reader)?;
7471 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7472 let holder_htlc_minimum_msat = Readable::read(reader)?;
7473 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7475 let mut minimum_depth = None;
7477 // Read the old serialization from version 0.0.98.
7478 minimum_depth = Some(Readable::read(reader)?);
7480 // Read the 4 bytes of backwards-compatibility data.
7481 let _dummy: u32 = Readable::read(reader)?;
7484 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7486 1 => Some(CounterpartyForwardingInfo {
7487 fee_base_msat: Readable::read(reader)?,
7488 fee_proportional_millionths: Readable::read(reader)?,
7489 cltv_expiry_delta: Readable::read(reader)?,
7491 _ => return Err(DecodeError::InvalidValue),
7494 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7495 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7497 let counterparty_cur_commitment_point = Readable::read(reader)?;
7499 let counterparty_prev_commitment_point = Readable::read(reader)?;
7500 let counterparty_node_id = Readable::read(reader)?;
7502 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7503 let commitment_secrets = Readable::read(reader)?;
7505 let channel_update_status = Readable::read(reader)?;
7507 #[cfg(any(test, fuzzing))]
7508 let mut historical_inbound_htlc_fulfills = HashSet::new();
7509 #[cfg(any(test, fuzzing))]
7511 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7512 for _ in 0..htlc_fulfills_len {
7513 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7517 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7518 Some((feerate, if channel_parameters.is_outbound_from_holder {
7519 FeeUpdateState::Outbound
7521 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7527 let mut announcement_sigs = None;
7528 let mut target_closing_feerate_sats_per_kw = None;
7529 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7530 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7531 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7532 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7533 // only, so we default to that if none was written.
7534 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7535 let mut channel_creation_height = Some(serialized_height);
7536 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7538 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7539 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7540 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7541 let mut latest_inbound_scid_alias = None;
7542 let mut outbound_scid_alias = None;
7543 let mut channel_pending_event_emitted = None;
7544 let mut channel_ready_event_emitted = None;
7546 let mut user_id_high_opt: Option<u64> = None;
7547 let mut channel_keys_id: Option<[u8; 32]> = None;
7548 let mut temporary_channel_id: Option<ChannelId> = None;
7549 let mut holder_max_accepted_htlcs: Option<u16> = None;
7551 let mut blocked_monitor_updates = Some(Vec::new());
7553 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7554 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7556 let mut is_batch_funding: Option<()> = None;
7558 read_tlv_fields!(reader, {
7559 (0, announcement_sigs, option),
7560 (1, minimum_depth, option),
7561 (2, channel_type, option),
7562 (3, counterparty_selected_channel_reserve_satoshis, option),
7563 (4, holder_selected_channel_reserve_satoshis, option),
7564 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7565 (6, holder_max_htlc_value_in_flight_msat, option),
7566 (7, shutdown_scriptpubkey, option),
7567 (8, blocked_monitor_updates, optional_vec),
7568 (9, target_closing_feerate_sats_per_kw, option),
7569 (11, monitor_pending_finalized_fulfills, optional_vec),
7570 (13, channel_creation_height, option),
7571 (15, preimages_opt, optional_vec),
7572 (17, announcement_sigs_state, option),
7573 (19, latest_inbound_scid_alias, option),
7574 (21, outbound_scid_alias, option),
7575 (23, channel_ready_event_emitted, option),
7576 (25, user_id_high_opt, option),
7577 (27, channel_keys_id, option),
7578 (28, holder_max_accepted_htlcs, option),
7579 (29, temporary_channel_id, option),
7580 (31, channel_pending_event_emitted, option),
7581 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7582 (37, holding_cell_skimmed_fees_opt, optional_vec),
7583 (38, is_batch_funding, option),
7586 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7587 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7588 // If we've gotten to the funding stage of the channel, populate the signer with its
7589 // required channel parameters.
7590 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7591 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7592 holder_signer.provide_channel_parameters(&channel_parameters);
7594 (channel_keys_id, holder_signer)
7596 // `keys_data` can be `None` if we had corrupted data.
7597 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7598 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7599 (holder_signer.channel_keys_id(), holder_signer)
7602 if let Some(preimages) = preimages_opt {
7603 let mut iter = preimages.into_iter();
7604 for htlc in pending_outbound_htlcs.iter_mut() {
7606 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7607 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7609 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7610 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7615 // We expect all preimages to be consumed above
7616 if iter.next().is_some() {
7617 return Err(DecodeError::InvalidValue);
7621 let chan_features = channel_type.as_ref().unwrap();
7622 if !chan_features.is_subset(our_supported_features) {
7623 // If the channel was written by a new version and negotiated with features we don't
7624 // understand yet, refuse to read it.
7625 return Err(DecodeError::UnknownRequiredFeature);
7628 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7629 // To account for that, we're proactively setting/overriding the field here.
7630 channel_parameters.channel_type_features = chan_features.clone();
7632 let mut secp_ctx = Secp256k1::new();
7633 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7635 // `user_id` used to be a single u64 value. In order to remain backwards
7636 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7637 // separate u64 values.
7638 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7640 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7642 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7643 let mut iter = skimmed_fees.into_iter();
7644 for htlc in pending_outbound_htlcs.iter_mut() {
7645 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7647 // We expect all skimmed fees to be consumed above
7648 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7650 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7651 let mut iter = skimmed_fees.into_iter();
7652 for htlc in holding_cell_htlc_updates.iter_mut() {
7653 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7654 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7657 // We expect all skimmed fees to be consumed above
7658 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7662 context: ChannelContext {
7665 config: config.unwrap(),
7669 // Note that we don't care about serializing handshake limits as we only ever serialize
7670 // channel data after the handshake has completed.
7671 inbound_handshake_limits_override: None,
7674 temporary_channel_id,
7676 announcement_sigs_state: announcement_sigs_state.unwrap(),
7678 channel_value_satoshis,
7680 latest_monitor_update_id,
7682 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7683 shutdown_scriptpubkey,
7686 cur_holder_commitment_transaction_number,
7687 cur_counterparty_commitment_transaction_number,
7690 holder_max_accepted_htlcs,
7691 pending_inbound_htlcs,
7692 pending_outbound_htlcs,
7693 holding_cell_htlc_updates,
7697 monitor_pending_channel_ready,
7698 monitor_pending_revoke_and_ack,
7699 monitor_pending_commitment_signed,
7700 monitor_pending_forwards,
7701 monitor_pending_failures,
7702 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7704 signer_pending_commitment_update: false,
7705 signer_pending_funding: false,
7708 holding_cell_update_fee,
7709 next_holder_htlc_id,
7710 next_counterparty_htlc_id,
7711 update_time_counter,
7714 #[cfg(debug_assertions)]
7715 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7716 #[cfg(debug_assertions)]
7717 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7719 last_sent_closing_fee: None,
7720 pending_counterparty_closing_signed: None,
7721 expecting_peer_commitment_signed: false,
7722 closing_fee_limits: None,
7723 target_closing_feerate_sats_per_kw,
7725 funding_tx_confirmed_in,
7726 funding_tx_confirmation_height,
7728 channel_creation_height: channel_creation_height.unwrap(),
7730 counterparty_dust_limit_satoshis,
7731 holder_dust_limit_satoshis,
7732 counterparty_max_htlc_value_in_flight_msat,
7733 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7734 counterparty_selected_channel_reserve_satoshis,
7735 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7736 counterparty_htlc_minimum_msat,
7737 holder_htlc_minimum_msat,
7738 counterparty_max_accepted_htlcs,
7741 counterparty_forwarding_info,
7743 channel_transaction_parameters: channel_parameters,
7744 funding_transaction,
7747 counterparty_cur_commitment_point,
7748 counterparty_prev_commitment_point,
7749 counterparty_node_id,
7751 counterparty_shutdown_scriptpubkey,
7755 channel_update_status,
7756 closing_signed_in_flight: false,
7760 #[cfg(any(test, fuzzing))]
7761 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7762 #[cfg(any(test, fuzzing))]
7763 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7765 workaround_lnd_bug_4006: None,
7766 sent_message_awaiting_response: None,
7768 latest_inbound_scid_alias,
7769 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7770 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7772 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7773 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7775 #[cfg(any(test, fuzzing))]
7776 historical_inbound_htlc_fulfills,
7778 channel_type: channel_type.unwrap(),
7781 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7790 use bitcoin::blockdata::constants::ChainHash;
7791 use bitcoin::blockdata::script::{Script, Builder};
7792 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7793 use bitcoin::blockdata::opcodes;
7794 use bitcoin::network::constants::Network;
7796 use crate::ln::PaymentHash;
7797 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7798 use crate::ln::channel::InitFeatures;
7799 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_sat};
7800 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7801 use crate::ln::features::ChannelTypeFeatures;
7802 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7803 use crate::ln::script::ShutdownScript;
7804 use crate::ln::chan_utils;
7805 use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7806 use crate::chain::BestBlock;
7807 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7808 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7809 use crate::chain::transaction::OutPoint;
7810 use crate::routing::router::Path;
7811 use crate::util::config::UserConfig;
7812 use crate::util::errors::APIError;
7813 use crate::util::test_utils;
7814 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7815 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7816 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7817 use bitcoin::secp256k1::{SecretKey,PublicKey};
7818 use bitcoin::hashes::sha256::Hash as Sha256;
7819 use bitcoin::hashes::Hash;
7820 use bitcoin::hash_types::WPubkeyHash;
7821 use bitcoin::PackedLockTime;
7822 use bitcoin::util::address::WitnessVersion;
7823 use crate::prelude::*;
7825 struct TestFeeEstimator {
7828 impl FeeEstimator for TestFeeEstimator {
7829 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7835 fn test_max_funding_satoshis_no_wumbo() {
7836 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7837 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7838 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7842 fn test_no_fee_check_overflow() {
7843 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7844 // arithmetic, causing a panic with debug assertions enabled.
7845 let fee_est = TestFeeEstimator { fee_est: 42 };
7846 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7847 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
7848 &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
7849 u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7853 signer: InMemorySigner,
7856 impl EntropySource for Keys {
7857 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7860 impl SignerProvider for Keys {
7861 type Signer = InMemorySigner;
7863 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7864 self.signer.channel_keys_id()
7867 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7871 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7873 fn get_destination_script(&self) -> Result<Script, ()> {
7874 let secp_ctx = Secp256k1::signing_only();
7875 let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7876 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7877 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7880 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7881 let secp_ctx = Secp256k1::signing_only();
7882 let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7883 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7887 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7888 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7889 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7893 fn upfront_shutdown_script_incompatibility() {
7894 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7895 let non_v0_segwit_shutdown_script =
7896 ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7898 let seed = [42; 32];
7899 let network = Network::Testnet;
7900 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7901 keys_provider.expect(OnGetShutdownScriptpubkey {
7902 returns: non_v0_segwit_shutdown_script.clone(),
7905 let secp_ctx = Secp256k1::new();
7906 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7907 let config = UserConfig::default();
7908 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
7909 Err(APIError::IncompatibleShutdownScript { script }) => {
7910 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7912 Err(e) => panic!("Unexpected error: {:?}", e),
7913 Ok(_) => panic!("Expected error"),
7917 // Check that, during channel creation, we use the same feerate in the open channel message
7918 // as we do in the Channel object creation itself.
7920 fn test_open_channel_msg_fee() {
7921 let original_fee = 253;
7922 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7923 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7924 let secp_ctx = Secp256k1::new();
7925 let seed = [42; 32];
7926 let network = Network::Testnet;
7927 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7929 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7930 let config = UserConfig::default();
7931 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
7933 // Now change the fee so we can check that the fee in the open_channel message is the
7934 // same as the old fee.
7935 fee_est.fee_est = 500;
7936 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7937 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7941 fn test_holder_vs_counterparty_dust_limit() {
7942 // Test that when calculating the local and remote commitment transaction fees, the correct
7943 // dust limits are used.
7944 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7945 let secp_ctx = Secp256k1::new();
7946 let seed = [42; 32];
7947 let network = Network::Testnet;
7948 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7949 let logger = test_utils::TestLogger::new();
7950 let best_block = BestBlock::from_network(network);
7952 // Go through the flow of opening a channel between two nodes, making sure
7953 // they have different dust limits.
7955 // Create Node A's channel pointing to Node B's pubkey
7956 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7957 let config = UserConfig::default();
7958 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
7960 // Create Node B's channel by receiving Node A's open_channel message
7961 // Make sure A's dust limit is as we expect.
7962 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7963 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7964 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7966 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7967 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
7968 accept_channel_msg.dust_limit_satoshis = 546;
7969 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7970 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7972 // Node A --> Node B: funding created
7973 let output_script = node_a_chan.context.get_funding_redeemscript();
7974 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7975 value: 10000000, script_pubkey: output_script.clone(),
7977 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7978 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7979 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7981 // Node B --> Node A: funding signed
7982 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
7984 // Put some inbound and outbound HTLCs in A's channel.
7985 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7986 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7988 amount_msat: htlc_amount_msat,
7989 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7990 cltv_expiry: 300000000,
7991 state: InboundHTLCState::Committed,
7994 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7996 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7997 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7998 cltv_expiry: 200000000,
7999 state: OutboundHTLCState::Committed,
8000 source: HTLCSource::OutboundRoute {
8001 path: Path { hops: Vec::new(), blinded_tail: None },
8002 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8003 first_hop_htlc_msat: 548,
8004 payment_id: PaymentId([42; 32]),
8006 skimmed_fee_msat: None,
8009 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8010 // the dust limit check.
8011 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8012 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8013 let local_commit_fee_0_htlcs = commit_tx_fee_sat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type()) * 1000;
8014 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8016 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8017 // of the HTLCs are seen to be above the dust limit.
8018 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8019 let remote_commit_fee_3_htlcs = commit_tx_fee_sat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type()) * 1000;
8020 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8021 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8022 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8026 fn test_timeout_vs_success_htlc_dust_limit() {
8027 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8028 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8029 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8030 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8031 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8032 let secp_ctx = Secp256k1::new();
8033 let seed = [42; 32];
8034 let network = Network::Testnet;
8035 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8037 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8038 let config = UserConfig::default();
8039 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8041 let commitment_tx_fee_0_htlcs = commit_tx_fee_sat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type()) * 1000;
8042 let commitment_tx_fee_1_htlc = commit_tx_fee_sat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type()) * 1000;
8044 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8045 // counted as dust when it shouldn't be.
8046 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8047 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8048 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8049 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8051 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8052 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8053 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8054 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8055 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8057 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8059 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8060 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8061 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8062 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8063 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8065 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8066 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8067 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8068 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8069 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8073 fn channel_reestablish_no_updates() {
8074 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8075 let logger = test_utils::TestLogger::new();
8076 let secp_ctx = Secp256k1::new();
8077 let seed = [42; 32];
8078 let network = Network::Testnet;
8079 let best_block = BestBlock::from_network(network);
8080 let chain_hash = ChainHash::using_genesis_block(network);
8081 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8083 // Go through the flow of opening a channel between two nodes.
8085 // Create Node A's channel pointing to Node B's pubkey
8086 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8087 let config = UserConfig::default();
8088 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8090 // Create Node B's channel by receiving Node A's open_channel message
8091 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8092 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8093 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8095 // Node B --> Node A: accept channel
8096 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8097 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8099 // Node A --> Node B: funding created
8100 let output_script = node_a_chan.context.get_funding_redeemscript();
8101 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8102 value: 10000000, script_pubkey: output_script.clone(),
8104 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8105 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8106 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8108 // Node B --> Node A: funding signed
8109 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8111 // Now disconnect the two nodes and check that the commitment point in
8112 // Node B's channel_reestablish message is sane.
8113 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8114 let msg = node_b_chan.get_channel_reestablish(&&logger);
8115 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8116 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8117 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8119 // Check that the commitment point in Node A's channel_reestablish message
8121 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8122 let msg = node_a_chan.get_channel_reestablish(&&logger);
8123 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8124 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8125 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8129 fn test_configured_holder_max_htlc_value_in_flight() {
8130 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8131 let logger = test_utils::TestLogger::new();
8132 let secp_ctx = Secp256k1::new();
8133 let seed = [42; 32];
8134 let network = Network::Testnet;
8135 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8136 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8137 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8139 let mut config_2_percent = UserConfig::default();
8140 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8141 let mut config_99_percent = UserConfig::default();
8142 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8143 let mut config_0_percent = UserConfig::default();
8144 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8145 let mut config_101_percent = UserConfig::default();
8146 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8148 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8149 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8150 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8151 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8152 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8153 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8155 // Test with the upper bound - 1 of valid values (99%).
8156 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8157 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8158 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8160 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8162 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8163 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8164 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8165 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8166 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8167 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8169 // Test with the upper bound - 1 of valid values (99%).
8170 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8171 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8172 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8174 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8175 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8176 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8177 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8178 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8180 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8181 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8183 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8184 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8185 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8187 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8188 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8189 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8190 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8191 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8193 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8194 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8196 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8197 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8198 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8202 fn test_configured_holder_selected_channel_reserve_satoshis() {
8204 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8205 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8206 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8208 // Test with valid but unreasonably high channel reserves
8209 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8210 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8211 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8213 // Test with calculated channel reserve less than lower bound
8214 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8215 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8217 // Test with invalid channel reserves since sum of both is greater than or equal
8219 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8220 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8223 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8224 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8225 let logger = test_utils::TestLogger::new();
8226 let secp_ctx = Secp256k1::new();
8227 let seed = [42; 32];
8228 let network = Network::Testnet;
8229 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8230 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8231 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8234 let mut outbound_node_config = UserConfig::default();
8235 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8236 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8238 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8239 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8241 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8242 let mut inbound_node_config = UserConfig::default();
8243 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8245 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8246 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8248 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8250 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8251 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8253 // Channel Negotiations failed
8254 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8255 assert!(result.is_err());
8260 fn channel_update() {
8261 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8262 let logger = test_utils::TestLogger::new();
8263 let secp_ctx = Secp256k1::new();
8264 let seed = [42; 32];
8265 let network = Network::Testnet;
8266 let best_block = BestBlock::from_network(network);
8267 let chain_hash = ChainHash::using_genesis_block(network);
8268 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8270 // Create Node A's channel pointing to Node B's pubkey
8271 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8272 let config = UserConfig::default();
8273 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8275 // Create Node B's channel by receiving Node A's open_channel message
8276 // Make sure A's dust limit is as we expect.
8277 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8278 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8279 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8281 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8282 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8283 accept_channel_msg.dust_limit_satoshis = 546;
8284 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8285 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8287 // Node A --> Node B: funding created
8288 let output_script = node_a_chan.context.get_funding_redeemscript();
8289 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8290 value: 10000000, script_pubkey: output_script.clone(),
8292 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8293 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8294 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8296 // Node B --> Node A: funding signed
8297 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8299 // Make sure that receiving a channel update will update the Channel as expected.
8300 let update = ChannelUpdate {
8301 contents: UnsignedChannelUpdate {
8303 short_channel_id: 0,
8306 cltv_expiry_delta: 100,
8307 htlc_minimum_msat: 5,
8308 htlc_maximum_msat: MAX_VALUE_MSAT,
8310 fee_proportional_millionths: 11,
8311 excess_data: Vec::new(),
8313 signature: Signature::from(unsafe { FFISignature::new() })
8315 assert!(node_a_chan.channel_update(&update).unwrap());
8317 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8318 // change our official htlc_minimum_msat.
8319 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8320 match node_a_chan.context.counterparty_forwarding_info() {
8322 assert_eq!(info.cltv_expiry_delta, 100);
8323 assert_eq!(info.fee_base_msat, 110);
8324 assert_eq!(info.fee_proportional_millionths, 11);
8326 None => panic!("expected counterparty forwarding info to be Some")
8329 assert!(!node_a_chan.channel_update(&update).unwrap());
8332 #[cfg(feature = "_test_vectors")]
8334 fn outbound_commitment_test() {
8335 use bitcoin::util::sighash;
8336 use bitcoin::consensus::encode::serialize;
8337 use bitcoin::blockdata::transaction::EcdsaSighashType;
8338 use bitcoin::hashes::hex::FromHex;
8339 use bitcoin::hash_types::Txid;
8340 use bitcoin::secp256k1::Message;
8341 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
8342 use crate::ln::PaymentPreimage;
8343 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8344 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8345 use crate::util::logger::Logger;
8346 use crate::sync::Arc;
8348 // Test vectors from BOLT 3 Appendices C and F (anchors):
8349 let feeest = TestFeeEstimator{fee_est: 15000};
8350 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8351 let secp_ctx = Secp256k1::new();
8353 let mut signer = InMemorySigner::new(
8355 SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8356 SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8357 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8358 SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8359 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8361 // These aren't set in the test vectors:
8362 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8368 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8369 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8370 let keys_provider = Keys { signer: signer.clone() };
8372 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8373 let mut config = UserConfig::default();
8374 config.channel_handshake_config.announced_channel = false;
8375 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8376 chan.context.holder_dust_limit_satoshis = 546;
8377 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8379 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8381 let counterparty_pubkeys = ChannelPublicKeys {
8382 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8383 revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8384 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8385 delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8386 htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8388 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8389 CounterpartyChannelTransactionParameters {
8390 pubkeys: counterparty_pubkeys.clone(),
8391 selected_contest_delay: 144
8393 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8394 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8396 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8397 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8399 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8400 hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8402 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8403 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8405 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8406 // derived from a commitment_seed, so instead we copy it here and call
8407 // build_commitment_transaction.
8408 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8409 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8410 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8411 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8412 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8414 macro_rules! test_commitment {
8415 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8416 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8417 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8421 macro_rules! test_commitment_with_anchors {
8422 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8423 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8424 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8428 macro_rules! test_commitment_common {
8429 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8430 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8432 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8433 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8435 let htlcs = commitment_stats.htlcs_included.drain(..)
8436 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8438 (commitment_stats.tx, htlcs)
8440 let trusted_tx = commitment_tx.trust();
8441 let unsigned_tx = trusted_tx.built_transaction();
8442 let redeemscript = chan.context.get_funding_redeemscript();
8443 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8444 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8445 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8446 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8448 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8449 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8450 let mut counterparty_htlc_sigs = Vec::new();
8451 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8453 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8454 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8455 counterparty_htlc_sigs.push(remote_signature);
8457 assert_eq!(htlcs.len(), per_htlc.len());
8459 let holder_commitment_tx = HolderCommitmentTransaction::new(
8460 commitment_tx.clone(),
8461 counterparty_signature,
8462 counterparty_htlc_sigs,
8463 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8464 chan.context.counterparty_funding_pubkey()
8466 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8467 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8469 let funding_redeemscript = chan.context.get_funding_redeemscript();
8470 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8471 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8473 // ((htlc, counterparty_sig), (index, holder_sig))
8474 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8477 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8478 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8480 let ref htlc = htlcs[$htlc_idx];
8481 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8482 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8483 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8484 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8485 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8486 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8487 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8489 let mut preimage: Option<PaymentPreimage> = None;
8492 let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8493 if out == htlc.payment_hash {
8494 preimage = Some(PaymentPreimage([i; 32]));
8498 assert!(preimage.is_some());
8501 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8502 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8503 channel_derivation_parameters: ChannelDerivationParameters {
8504 value_satoshis: chan.context.channel_value_satoshis,
8505 keys_id: chan.context.channel_keys_id,
8506 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8508 commitment_txid: trusted_tx.txid(),
8509 per_commitment_number: trusted_tx.commitment_number(),
8510 per_commitment_point: trusted_tx.per_commitment_point(),
8511 feerate_per_kw: trusted_tx.feerate_per_kw(),
8513 preimage: preimage.clone(),
8514 counterparty_sig: *htlc_counterparty_sig,
8515 }, &secp_ctx).unwrap();
8516 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8517 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8519 let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8520 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8521 let trusted_tx = holder_commitment_tx.trust();
8522 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8523 log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
8524 assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8526 assert!(htlc_counterparty_sig_iter.next().is_none());
8530 // anchors: simple commitment tx with no HTLCs and single anchor
8531 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8532 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8533 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8535 // simple commitment tx with no HTLCs
8536 chan.context.value_to_self_msat = 7000000000;
8538 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8539 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8540 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8542 // anchors: simple commitment tx with no HTLCs
8543 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8544 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8545 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8547 chan.context.pending_inbound_htlcs.push({
8548 let mut out = InboundHTLCOutput{
8550 amount_msat: 1000000,
8552 payment_hash: PaymentHash([0; 32]),
8553 state: InboundHTLCState::Committed,
8555 out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8558 chan.context.pending_inbound_htlcs.push({
8559 let mut out = InboundHTLCOutput{
8561 amount_msat: 2000000,
8563 payment_hash: PaymentHash([0; 32]),
8564 state: InboundHTLCState::Committed,
8566 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8569 chan.context.pending_outbound_htlcs.push({
8570 let mut out = OutboundHTLCOutput{
8572 amount_msat: 2000000,
8574 payment_hash: PaymentHash([0; 32]),
8575 state: OutboundHTLCState::Committed,
8576 source: HTLCSource::dummy(),
8577 skimmed_fee_msat: None,
8579 out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8582 chan.context.pending_outbound_htlcs.push({
8583 let mut out = OutboundHTLCOutput{
8585 amount_msat: 3000000,
8587 payment_hash: PaymentHash([0; 32]),
8588 state: OutboundHTLCState::Committed,
8589 source: HTLCSource::dummy(),
8590 skimmed_fee_msat: None,
8592 out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8595 chan.context.pending_inbound_htlcs.push({
8596 let mut out = InboundHTLCOutput{
8598 amount_msat: 4000000,
8600 payment_hash: PaymentHash([0; 32]),
8601 state: InboundHTLCState::Committed,
8603 out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8607 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8608 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8609 chan.context.feerate_per_kw = 0;
8611 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8612 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8613 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8616 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8617 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8618 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8621 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8622 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8623 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8626 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8627 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8628 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8631 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8632 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8633 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8636 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8637 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8638 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8641 // commitment tx with seven outputs untrimmed (maximum feerate)
8642 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8643 chan.context.feerate_per_kw = 647;
8645 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8646 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8647 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8650 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8651 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8652 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8655 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8656 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8657 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8660 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8661 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8662 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8665 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8666 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8667 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8670 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8671 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8672 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8675 // commitment tx with six outputs untrimmed (minimum feerate)
8676 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8677 chan.context.feerate_per_kw = 648;
8679 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8680 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8681 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8684 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8685 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8686 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8689 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8690 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8691 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8694 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8695 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8696 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8699 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8700 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8701 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8704 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8705 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8706 chan.context.feerate_per_kw = 645;
8707 chan.context.holder_dust_limit_satoshis = 1001;
8709 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8710 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8711 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8714 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8715 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8716 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8719 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8720 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8721 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8724 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8725 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8726 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8729 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8730 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8731 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8734 // commitment tx with six outputs untrimmed (maximum feerate)
8735 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8736 chan.context.feerate_per_kw = 2069;
8737 chan.context.holder_dust_limit_satoshis = 546;
8739 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8740 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8741 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8744 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8745 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8746 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8749 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8750 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8751 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8754 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8755 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8756 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8759 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8760 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8761 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8764 // commitment tx with five outputs untrimmed (minimum feerate)
8765 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8766 chan.context.feerate_per_kw = 2070;
8768 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8769 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8770 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8773 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8774 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8775 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8778 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8779 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8780 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8783 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8784 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8785 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8788 // commitment tx with five outputs untrimmed (maximum feerate)
8789 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8790 chan.context.feerate_per_kw = 2194;
8792 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8793 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8794 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8797 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8798 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8799 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8802 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8803 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8804 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8807 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8808 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8809 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8812 // commitment tx with four outputs untrimmed (minimum feerate)
8813 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8814 chan.context.feerate_per_kw = 2195;
8816 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8817 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8818 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8821 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8822 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8823 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8826 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8827 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8828 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8831 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8832 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8833 chan.context.feerate_per_kw = 2185;
8834 chan.context.holder_dust_limit_satoshis = 2001;
8835 let cached_channel_type = chan.context.channel_type;
8836 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8838 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8839 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8840 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8843 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8844 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8845 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8848 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8849 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8850 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8853 // commitment tx with four outputs untrimmed (maximum feerate)
8854 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8855 chan.context.feerate_per_kw = 3702;
8856 chan.context.holder_dust_limit_satoshis = 546;
8857 chan.context.channel_type = cached_channel_type.clone();
8859 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8860 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8861 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8864 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8865 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8866 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8869 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8870 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8871 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8874 // commitment tx with three outputs untrimmed (minimum feerate)
8875 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8876 chan.context.feerate_per_kw = 3703;
8878 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8879 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8880 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8883 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8884 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8885 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8888 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8889 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8890 chan.context.feerate_per_kw = 3687;
8891 chan.context.holder_dust_limit_satoshis = 3001;
8892 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8894 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8895 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8896 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8899 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8900 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8901 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8904 // commitment tx with three outputs untrimmed (maximum feerate)
8905 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8906 chan.context.feerate_per_kw = 4914;
8907 chan.context.holder_dust_limit_satoshis = 546;
8908 chan.context.channel_type = cached_channel_type.clone();
8910 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8911 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8912 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8915 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8916 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8917 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8920 // commitment tx with two outputs untrimmed (minimum feerate)
8921 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8922 chan.context.feerate_per_kw = 4915;
8923 chan.context.holder_dust_limit_satoshis = 546;
8925 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8926 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8927 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8929 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8930 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8931 chan.context.feerate_per_kw = 4894;
8932 chan.context.holder_dust_limit_satoshis = 4001;
8933 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8935 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8936 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8937 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8939 // commitment tx with two outputs untrimmed (maximum feerate)
8940 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8941 chan.context.feerate_per_kw = 9651180;
8942 chan.context.holder_dust_limit_satoshis = 546;
8943 chan.context.channel_type = cached_channel_type.clone();
8945 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8946 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8947 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8949 // commitment tx with one output untrimmed (minimum feerate)
8950 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8951 chan.context.feerate_per_kw = 9651181;
8953 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8954 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8955 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8957 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8958 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8959 chan.context.feerate_per_kw = 6216010;
8960 chan.context.holder_dust_limit_satoshis = 4001;
8961 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8963 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8964 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8965 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8967 // commitment tx with fee greater than funder amount
8968 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8969 chan.context.feerate_per_kw = 9651936;
8970 chan.context.holder_dust_limit_satoshis = 546;
8971 chan.context.channel_type = cached_channel_type;
8973 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8974 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8975 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8977 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8978 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8979 chan.context.feerate_per_kw = 253;
8980 chan.context.pending_inbound_htlcs.clear();
8981 chan.context.pending_inbound_htlcs.push({
8982 let mut out = InboundHTLCOutput{
8984 amount_msat: 2000000,
8986 payment_hash: PaymentHash([0; 32]),
8987 state: InboundHTLCState::Committed,
8989 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8992 chan.context.pending_outbound_htlcs.clear();
8993 chan.context.pending_outbound_htlcs.push({
8994 let mut out = OutboundHTLCOutput{
8996 amount_msat: 5000001,
8998 payment_hash: PaymentHash([0; 32]),
8999 state: OutboundHTLCState::Committed,
9000 source: HTLCSource::dummy(),
9001 skimmed_fee_msat: None,
9003 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
9006 chan.context.pending_outbound_htlcs.push({
9007 let mut out = OutboundHTLCOutput{
9009 amount_msat: 5000000,
9011 payment_hash: PaymentHash([0; 32]),
9012 state: OutboundHTLCState::Committed,
9013 source: HTLCSource::dummy(),
9014 skimmed_fee_msat: None,
9016 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
9020 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9021 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9022 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9025 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9026 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9027 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9029 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9030 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9031 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9033 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9034 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9035 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9038 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9039 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9040 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9041 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9044 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9045 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9046 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9048 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9049 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9050 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9052 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9053 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9054 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9059 fn test_per_commitment_secret_gen() {
9060 // Test vectors from BOLT 3 Appendix D:
9062 let mut seed = [0; 32];
9063 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9064 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9065 hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9067 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9068 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9069 hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9071 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9072 hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9074 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9075 hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9077 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9078 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9079 hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9083 fn test_key_derivation() {
9084 // Test vectors from BOLT 3 Appendix E:
9085 let secp_ctx = Secp256k1::new();
9087 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9088 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9090 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9091 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9093 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9094 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9096 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9097 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
9099 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9100 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9102 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9103 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9105 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9106 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9110 fn test_zero_conf_channel_type_support() {
9111 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9112 let secp_ctx = Secp256k1::new();
9113 let seed = [42; 32];
9114 let network = Network::Testnet;
9115 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9116 let logger = test_utils::TestLogger::new();
9118 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9119 let config = UserConfig::default();
9120 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9121 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9123 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9124 channel_type_features.set_zero_conf_required();
9126 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9127 open_channel_msg.channel_type = Some(channel_type_features);
9128 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9129 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9130 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9131 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9132 assert!(res.is_ok());
9136 fn test_supports_anchors_zero_htlc_tx_fee() {
9137 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9138 // resulting `channel_type`.
9139 let secp_ctx = Secp256k1::new();
9140 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9141 let network = Network::Testnet;
9142 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9143 let logger = test_utils::TestLogger::new();
9145 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9146 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9148 let mut config = UserConfig::default();
9149 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9151 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9152 // need to signal it.
9153 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9154 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9155 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9156 &config, 0, 42, None
9158 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9160 let mut expected_channel_type = ChannelTypeFeatures::empty();
9161 expected_channel_type.set_static_remote_key_required();
9162 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9164 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9165 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9166 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9170 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9171 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9172 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9173 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9174 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9177 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9178 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9182 fn test_rejects_implicit_simple_anchors() {
9183 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9184 // each side's `InitFeatures`, it is rejected.
9185 let secp_ctx = Secp256k1::new();
9186 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9187 let network = Network::Testnet;
9188 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9189 let logger = test_utils::TestLogger::new();
9191 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9192 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9194 let config = UserConfig::default();
9196 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9197 let static_remote_key_required: u64 = 1 << 12;
9198 let simple_anchors_required: u64 = 1 << 20;
9199 let raw_init_features = static_remote_key_required | simple_anchors_required;
9200 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9202 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9203 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9204 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9208 // Set `channel_type` to `None` to force the implicit feature negotiation.
9209 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9210 open_channel_msg.channel_type = None;
9212 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9213 // `static_remote_key`, it will fail the channel.
9214 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9215 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9216 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9217 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9219 assert!(channel_b.is_err());
9223 fn test_rejects_simple_anchors_channel_type() {
9224 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9226 let secp_ctx = Secp256k1::new();
9227 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9228 let network = Network::Testnet;
9229 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9230 let logger = test_utils::TestLogger::new();
9232 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9233 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9235 let config = UserConfig::default();
9237 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9238 let static_remote_key_required: u64 = 1 << 12;
9239 let simple_anchors_required: u64 = 1 << 20;
9240 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9241 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9242 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9243 assert!(!simple_anchors_init.requires_unknown_bits());
9244 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9246 // First, we'll try to open a channel between A and B where A requests a channel type for
9247 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9248 // B as it's not supported by LDK.
9249 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9250 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9251 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9255 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9256 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9258 let res = InboundV1Channel::<&TestKeysInterface>::new(
9259 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9260 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9261 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9263 assert!(res.is_err());
9265 // Then, we'll try to open another channel where A requests a channel type for
9266 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9267 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9269 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9270 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9271 10000000, 100000, 42, &config, 0, 42, None
9274 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9276 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9277 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9278 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9279 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9282 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9283 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9285 let res = channel_a.accept_channel(
9286 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9288 assert!(res.is_err());
9292 fn test_waiting_for_batch() {
9293 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9294 let logger = test_utils::TestLogger::new();
9295 let secp_ctx = Secp256k1::new();
9296 let seed = [42; 32];
9297 let network = Network::Testnet;
9298 let best_block = BestBlock::from_network(network);
9299 let chain_hash = ChainHash::using_genesis_block(network);
9300 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9302 let mut config = UserConfig::default();
9303 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9304 // channel in a batch before all channels are ready.
9305 config.channel_handshake_limits.trust_own_funding_0conf = true;
9307 // Create a channel from node a to node b that will be part of batch funding.
9308 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9309 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9314 &channelmanager::provided_init_features(&config),
9324 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9325 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9326 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9331 &channelmanager::provided_channel_type_features(&config),
9332 &channelmanager::provided_init_features(&config),
9338 true, // Allow node b to send a 0conf channel_ready.
9341 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9342 node_a_chan.accept_channel(
9343 &accept_channel_msg,
9344 &config.channel_handshake_limits,
9345 &channelmanager::provided_init_features(&config),
9348 // Fund the channel with a batch funding transaction.
9349 let output_script = node_a_chan.context.get_funding_redeemscript();
9350 let tx = Transaction {
9352 lock_time: PackedLockTime::ZERO,
9356 value: 10000000, script_pubkey: output_script.clone(),
9359 value: 10000000, script_pubkey: Builder::new().into_script(),
9362 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9363 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9368 ).map_err(|_| ()).unwrap();
9369 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9370 &funding_created_msg.unwrap(),
9374 ).map_err(|_| ()).unwrap();
9375 let node_b_updates = node_b_chan.monitor_updating_restored(
9383 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9384 // broadcasting the funding transaction until the batch is ready.
9385 let _ = node_a_chan.funding_signed(
9386 &funding_signed_msg.unwrap(),
9391 let node_a_updates = node_a_chan.monitor_updating_restored(
9398 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9399 // as the funding transaction depends on all channels in the batch becoming ready.
9400 assert!(node_a_updates.channel_ready.is_none());
9401 assert!(node_a_updates.funding_broadcastable.is_none());
9403 node_a_chan.context.channel_state,
9404 ChannelState::FundingSent as u32 |
9405 ChannelState::WaitingForBatch as u32,
9408 // It is possible to receive a 0conf channel_ready from the remote node.
9409 node_a_chan.channel_ready(
9410 &node_b_updates.channel_ready.unwrap(),
9418 node_a_chan.context.channel_state,
9419 ChannelState::FundingSent as u32 |
9420 ChannelState::WaitingForBatch as u32 |
9421 ChannelState::TheirChannelReady as u32,
9424 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9425 node_a_chan.set_batch_ready();
9427 node_a_chan.context.channel_state,
9428 ChannelState::FundingSent as u32 |
9429 ChannelState::TheirChannelReady as u32,
9431 assert!(node_a_chan.check_get_channel_ready(0).is_some());