1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::Logger;
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 enum OutboundHTLCState {
170 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
171 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
172 /// we will promote to Committed (note that they may not accept it until the next time we
173 /// revoke, but we don't really care about that:
174 /// * they've revoked, so worst case we can announce an old state and get our (option on)
175 /// money back (though we won't), and,
176 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
177 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
178 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
179 /// we'll never get out of sync).
180 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
181 /// OutboundHTLCOutput's size just for a temporary bit
182 LocalAnnounced(Box<msgs::OnionPacket>),
184 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
185 /// the change (though they'll need to revoke before we fail the payment).
186 RemoteRemoved(OutboundHTLCOutcome),
187 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
188 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
189 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
190 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
191 /// remote revoke_and_ack on a previous state before we can do so.
192 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
193 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
194 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
195 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
196 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
197 /// revoke_and_ack to drop completely.
198 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
202 enum OutboundHTLCOutcome {
203 /// LDK version 0.0.105+ will always fill in the preimage here.
204 Success(Option<PaymentPreimage>),
205 Failure(HTLCFailReason),
208 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
209 fn from(o: Option<HTLCFailReason>) -> Self {
211 None => OutboundHTLCOutcome::Success(None),
212 Some(r) => OutboundHTLCOutcome::Failure(r)
217 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
218 fn into(self) -> Option<&'a HTLCFailReason> {
220 OutboundHTLCOutcome::Success(_) => None,
221 OutboundHTLCOutcome::Failure(ref r) => Some(r)
226 struct OutboundHTLCOutput {
230 payment_hash: PaymentHash,
231 state: OutboundHTLCState,
233 skimmed_fee_msat: Option<u64>,
236 /// See AwaitingRemoteRevoke ChannelState for more info
237 enum HTLCUpdateAwaitingACK {
238 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
242 payment_hash: PaymentHash,
244 onion_routing_packet: msgs::OnionPacket,
245 // The extra fee we're skimming off the top of this HTLC.
246 skimmed_fee_msat: Option<u64>,
249 payment_preimage: PaymentPreimage,
254 err_packet: msgs::OnionErrorPacket,
258 /// There are a few "states" and then a number of flags which can be applied:
259 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
260 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
261 /// move on to `ChannelReady`.
262 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
263 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
264 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
266 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
267 OurInitSent = 1 << 0,
268 /// Implies we have received their `open_channel`/`accept_channel` message
269 TheirInitSent = 1 << 1,
270 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
271 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
272 /// upon receipt of `funding_created`, so simply skip this state.
274 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
275 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
276 /// and our counterparty consider the funding transaction confirmed.
278 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
279 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
280 TheirChannelReady = 1 << 4,
281 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
282 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
283 OurChannelReady = 1 << 5,
285 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
286 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
288 PeerDisconnected = 1 << 7,
289 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
290 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
291 /// sending any outbound messages until they've managed to finish.
292 MonitorUpdateInProgress = 1 << 8,
293 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
294 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
295 /// messages as then we will be unable to determine which HTLCs they included in their
296 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
298 /// Flag is set on `ChannelReady`.
299 AwaitingRemoteRevoke = 1 << 9,
300 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
301 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
302 /// to respond with our own shutdown message when possible.
303 RemoteShutdownSent = 1 << 10,
304 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
305 /// point, we may not add any new HTLCs to the channel.
306 LocalShutdownSent = 1 << 11,
307 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
308 /// to drop us, but we store this anyway.
309 ShutdownComplete = 4096,
310 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
311 /// broadcasting of the funding transaction is being held until all channels in the batch
312 /// have received funding_signed and have their monitors persisted.
313 WaitingForBatch = 1 << 13,
315 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
316 ChannelState::LocalShutdownSent as u32 |
317 ChannelState::RemoteShutdownSent as u32;
318 const MULTI_STATE_FLAGS: u32 =
319 BOTH_SIDES_SHUTDOWN_MASK |
320 ChannelState::PeerDisconnected as u32 |
321 ChannelState::MonitorUpdateInProgress as u32;
322 const STATE_FLAGS: u32 =
324 ChannelState::TheirChannelReady as u32 |
325 ChannelState::OurChannelReady as u32 |
326 ChannelState::AwaitingRemoteRevoke as u32 |
327 ChannelState::WaitingForBatch as u32;
329 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
331 pub const DEFAULT_MAX_HTLCS: u16 = 50;
333 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
334 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
335 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
336 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
340 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
342 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
344 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
346 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
347 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
348 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
349 /// `holder_max_htlc_value_in_flight_msat`.
350 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
352 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
353 /// `option_support_large_channel` (aka wumbo channels) is not supported.
355 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
357 /// Total bitcoin supply in satoshis.
358 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
360 /// The maximum network dust limit for standard script formats. This currently represents the
361 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
362 /// transaction non-standard and thus refuses to relay it.
363 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
364 /// implementations use this value for their dust limit today.
365 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
367 /// The maximum channel dust limit we will accept from our counterparty.
368 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
370 /// The dust limit is used for both the commitment transaction outputs as well as the closing
371 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
372 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
373 /// In order to avoid having to concern ourselves with standardness during the closing process, we
374 /// simply require our counterparty to use a dust limit which will leave any segwit output
376 /// See <https://github.com/lightning/bolts/issues/905> for more details.
377 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
379 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
380 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
382 /// Used to return a simple Error back to ChannelManager. Will get converted to a
383 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
384 /// channel_id in ChannelManager.
385 pub(super) enum ChannelError {
391 impl fmt::Debug for ChannelError {
392 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
394 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
395 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
396 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
401 impl fmt::Display for ChannelError {
402 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
404 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
405 &ChannelError::Warn(ref e) => write!(f, "{}", e),
406 &ChannelError::Close(ref e) => write!(f, "{}", e),
411 macro_rules! secp_check {
412 ($res: expr, $err: expr) => {
415 Err(_) => return Err(ChannelError::Close($err)),
420 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
421 /// our counterparty or not. However, we don't want to announce updates right away to avoid
422 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
423 /// our channel_update message and track the current state here.
424 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
425 #[derive(Clone, Copy, PartialEq)]
426 pub(super) enum ChannelUpdateStatus {
427 /// We've announced the channel as enabled and are connected to our peer.
429 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
431 /// Our channel is live again, but we haven't announced the channel as enabled yet.
433 /// We've announced the channel as disabled.
437 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
439 pub enum AnnouncementSigsState {
440 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
441 /// we sent the last `AnnouncementSignatures`.
443 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
444 /// This state never appears on disk - instead we write `NotSent`.
446 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
447 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
448 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
449 /// they send back a `RevokeAndACK`.
450 /// This state never appears on disk - instead we write `NotSent`.
452 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
453 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
457 /// An enum indicating whether the local or remote side offered a given HTLC.
463 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
466 pending_htlcs_value_msat: u64,
467 on_counterparty_tx_dust_exposure_msat: u64,
468 on_holder_tx_dust_exposure_msat: u64,
469 holding_cell_msat: u64,
470 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
473 /// An enum gathering stats on commitment transaction, either local or remote.
474 struct CommitmentStats<'a> {
475 tx: CommitmentTransaction, // the transaction info
476 feerate_per_kw: u32, // the feerate included to build the transaction
477 total_fee_sat: u64, // the total fee included in the transaction
478 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
479 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
480 local_balance_msat: u64, // local balance before fees but considering dust limits
481 remote_balance_msat: u64, // remote balance before fees but considering dust limits
482 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
485 /// Used when calculating whether we or the remote can afford an additional HTLC.
486 struct HTLCCandidate {
488 origin: HTLCInitiator,
492 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
500 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
502 enum UpdateFulfillFetch {
504 monitor_update: ChannelMonitorUpdate,
505 htlc_value_msat: u64,
506 msg: Option<msgs::UpdateFulfillHTLC>,
511 /// The return type of get_update_fulfill_htlc_and_commit.
512 pub enum UpdateFulfillCommitFetch {
513 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
514 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
515 /// previously placed in the holding cell (and has since been removed).
517 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
518 monitor_update: ChannelMonitorUpdate,
519 /// The value of the HTLC which was claimed, in msat.
520 htlc_value_msat: u64,
522 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
523 /// or has been forgotten (presumably previously claimed).
527 /// The return value of `monitor_updating_restored`
528 pub(super) struct MonitorRestoreUpdates {
529 pub raa: Option<msgs::RevokeAndACK>,
530 pub commitment_update: Option<msgs::CommitmentUpdate>,
531 pub order: RAACommitmentOrder,
532 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
533 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
534 pub finalized_claimed_htlcs: Vec<HTLCSource>,
535 pub funding_broadcastable: Option<Transaction>,
536 pub channel_ready: Option<msgs::ChannelReady>,
537 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
540 /// The return value of `signer_maybe_unblocked`
542 pub(super) struct SignerResumeUpdates {
543 pub commitment_update: Option<msgs::CommitmentUpdate>,
544 pub funding_signed: Option<msgs::FundingSigned>,
545 pub funding_created: Option<msgs::FundingCreated>,
546 pub channel_ready: Option<msgs::ChannelReady>,
549 /// The return value of `channel_reestablish`
550 pub(super) struct ReestablishResponses {
551 pub channel_ready: Option<msgs::ChannelReady>,
552 pub raa: Option<msgs::RevokeAndACK>,
553 pub commitment_update: Option<msgs::CommitmentUpdate>,
554 pub order: RAACommitmentOrder,
555 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
556 pub shutdown_msg: Option<msgs::Shutdown>,
559 /// The result of a shutdown that should be handled.
561 pub(crate) struct ShutdownResult {
562 /// A channel monitor update to apply.
563 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
564 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
565 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
566 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
567 /// propagated to the remainder of the batch.
568 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
571 /// If the majority of the channels funds are to the fundee and the initiator holds only just
572 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
573 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
574 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
575 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
576 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
577 /// by this multiple without hitting this case, before sending.
578 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
579 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
580 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
581 /// leave the channel less usable as we hold a bigger reserve.
582 #[cfg(any(fuzzing, test))]
583 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
584 #[cfg(not(any(fuzzing, test)))]
585 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
587 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
588 /// channel creation on an inbound channel, we simply force-close and move on.
589 /// This constant is the one suggested in BOLT 2.
590 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
592 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
593 /// not have enough balance value remaining to cover the onchain cost of this new
594 /// HTLC weight. If this happens, our counterparty fails the reception of our
595 /// commitment_signed including this new HTLC due to infringement on the channel
597 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
598 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
599 /// leads to a channel force-close. Ultimately, this is an issue coming from the
600 /// design of LN state machines, allowing asynchronous updates.
601 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
603 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
604 /// commitment transaction fees, with at least this many HTLCs present on the commitment
605 /// transaction (not counting the value of the HTLCs themselves).
606 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
608 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
609 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
610 /// ChannelUpdate prompted by the config update. This value was determined as follows:
612 /// * The expected interval between ticks (1 minute).
613 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
614 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
615 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
616 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
618 /// The number of ticks that may elapse while we're waiting for a response to a
619 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
622 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
623 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
625 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
626 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
627 /// exceeding this age limit will be force-closed and purged from memory.
628 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
630 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
631 pub(crate) const COINBASE_MATURITY: u32 = 100;
633 struct PendingChannelMonitorUpdate {
634 update: ChannelMonitorUpdate,
637 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
638 (0, update, required),
641 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
642 /// its variants containing an appropriate channel struct.
643 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
644 UnfundedOutboundV1(OutboundV1Channel<SP>),
645 UnfundedInboundV1(InboundV1Channel<SP>),
649 impl<'a, SP: Deref> ChannelPhase<SP> where
650 SP::Target: SignerProvider,
651 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
653 pub fn context(&'a self) -> &'a ChannelContext<SP> {
655 ChannelPhase::Funded(chan) => &chan.context,
656 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
657 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
661 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
663 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
664 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
665 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
670 /// Contains all state common to unfunded inbound/outbound channels.
671 pub(super) struct UnfundedChannelContext {
672 /// A counter tracking how many ticks have elapsed since this unfunded channel was
673 /// created. If this unfunded channel reaches peer has yet to respond after reaching
674 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
676 /// This is so that we don't keep channels around that haven't progressed to a funded state
677 /// in a timely manner.
678 unfunded_channel_age_ticks: usize,
681 impl UnfundedChannelContext {
682 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
683 /// having reached the unfunded channel age limit.
685 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
686 pub fn should_expire_unfunded_channel(&mut self) -> bool {
687 self.unfunded_channel_age_ticks += 1;
688 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
692 /// Contains everything about the channel including state, and various flags.
693 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
694 config: LegacyChannelConfig,
696 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
697 // constructed using it. The second element in the tuple corresponds to the number of ticks that
698 // have elapsed since the update occurred.
699 prev_config: Option<(ChannelConfig, usize)>,
701 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
705 /// The current channel ID.
706 channel_id: ChannelId,
707 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
708 /// Will be `None` for channels created prior to 0.0.115.
709 temporary_channel_id: Option<ChannelId>,
712 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
713 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
715 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
716 // Note that a number of our tests were written prior to the behavior here which retransmits
717 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
719 #[cfg(any(test, feature = "_test_utils"))]
720 pub(crate) announcement_sigs_state: AnnouncementSigsState,
721 #[cfg(not(any(test, feature = "_test_utils")))]
722 announcement_sigs_state: AnnouncementSigsState,
724 secp_ctx: Secp256k1<secp256k1::All>,
725 channel_value_satoshis: u64,
727 latest_monitor_update_id: u64,
729 holder_signer: ChannelSignerType<SP>,
730 shutdown_scriptpubkey: Option<ShutdownScript>,
731 destination_script: ScriptBuf,
733 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
734 // generation start at 0 and count up...this simplifies some parts of implementation at the
735 // cost of others, but should really just be changed.
737 cur_holder_commitment_transaction_number: u64,
738 cur_counterparty_commitment_transaction_number: u64,
739 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
740 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
741 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
742 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
744 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
745 /// need to ensure we resend them in the order we originally generated them. Note that because
746 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
747 /// sufficient to simply set this to the opposite of any message we are generating as we
748 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
749 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
751 resend_order: RAACommitmentOrder,
753 monitor_pending_channel_ready: bool,
754 monitor_pending_revoke_and_ack: bool,
755 monitor_pending_commitment_signed: bool,
757 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
758 // responsible for some of the HTLCs here or not - we don't know whether the update in question
759 // completed or not. We currently ignore these fields entirely when force-closing a channel,
760 // but need to handle this somehow or we run the risk of losing HTLCs!
761 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
762 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
763 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
765 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
766 /// but our signer (initially) refused to give us a signature, we should retry at some point in
767 /// the future when the signer indicates it may have a signature for us.
769 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
770 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
771 signer_pending_commitment_update: bool,
772 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
773 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
774 /// outbound or inbound.
775 signer_pending_funding: bool,
777 // pending_update_fee is filled when sending and receiving update_fee.
779 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
780 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
781 // generating new commitment transactions with exactly the same criteria as inbound/outbound
782 // HTLCs with similar state.
783 pending_update_fee: Option<(u32, FeeUpdateState)>,
784 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
785 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
786 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
787 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
788 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
789 holding_cell_update_fee: Option<u32>,
790 next_holder_htlc_id: u64,
791 next_counterparty_htlc_id: u64,
794 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
795 /// when the channel is updated in ways which may impact the `channel_update` message or when a
796 /// new block is received, ensuring it's always at least moderately close to the current real
798 update_time_counter: u32,
800 #[cfg(debug_assertions)]
801 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
802 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
803 #[cfg(debug_assertions)]
804 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
805 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
807 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
808 target_closing_feerate_sats_per_kw: Option<u32>,
810 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
811 /// update, we need to delay processing it until later. We do that here by simply storing the
812 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
813 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
815 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
816 /// transaction. These are set once we reach `closing_negotiation_ready`.
818 pub(crate) closing_fee_limits: Option<(u64, u64)>,
820 closing_fee_limits: Option<(u64, u64)>,
822 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
823 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
824 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
825 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
826 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
828 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
829 /// until we see a `commitment_signed` before doing so.
831 /// We don't bother to persist this - we anticipate this state won't last longer than a few
832 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
833 expecting_peer_commitment_signed: bool,
835 /// The hash of the block in which the funding transaction was included.
836 funding_tx_confirmed_in: Option<BlockHash>,
837 funding_tx_confirmation_height: u32,
838 short_channel_id: Option<u64>,
839 /// Either the height at which this channel was created or the height at which it was last
840 /// serialized if it was serialized by versions prior to 0.0.103.
841 /// We use this to close if funding is never broadcasted.
842 channel_creation_height: u32,
844 counterparty_dust_limit_satoshis: u64,
847 pub(super) holder_dust_limit_satoshis: u64,
849 holder_dust_limit_satoshis: u64,
852 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
854 counterparty_max_htlc_value_in_flight_msat: u64,
857 pub(super) holder_max_htlc_value_in_flight_msat: u64,
859 holder_max_htlc_value_in_flight_msat: u64,
861 /// minimum channel reserve for self to maintain - set by them.
862 counterparty_selected_channel_reserve_satoshis: Option<u64>,
865 pub(super) holder_selected_channel_reserve_satoshis: u64,
867 holder_selected_channel_reserve_satoshis: u64,
869 counterparty_htlc_minimum_msat: u64,
870 holder_htlc_minimum_msat: u64,
872 pub counterparty_max_accepted_htlcs: u16,
874 counterparty_max_accepted_htlcs: u16,
875 holder_max_accepted_htlcs: u16,
876 minimum_depth: Option<u32>,
878 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
880 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
881 funding_transaction: Option<Transaction>,
882 is_batch_funding: Option<()>,
884 counterparty_cur_commitment_point: Option<PublicKey>,
885 counterparty_prev_commitment_point: Option<PublicKey>,
886 counterparty_node_id: PublicKey,
888 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
890 commitment_secrets: CounterpartyCommitmentSecrets,
892 channel_update_status: ChannelUpdateStatus,
893 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
894 /// not complete within a single timer tick (one minute), we should force-close the channel.
895 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
897 /// Note that this field is reset to false on deserialization to give us a chance to connect to
898 /// our peer and start the closing_signed negotiation fresh.
899 closing_signed_in_flight: bool,
901 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
902 /// This can be used to rebroadcast the channel_announcement message later.
903 announcement_sigs: Option<(Signature, Signature)>,
905 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
906 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
907 // be, by comparing the cached values to the fee of the tranaction generated by
908 // `build_commitment_transaction`.
909 #[cfg(any(test, fuzzing))]
910 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
911 #[cfg(any(test, fuzzing))]
912 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
914 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
915 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
916 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
917 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
918 /// message until we receive a channel_reestablish.
920 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
921 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
923 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
924 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
925 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
926 /// unblock the state machine.
928 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
929 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
930 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
932 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
933 /// [`msgs::RevokeAndACK`] message from the counterparty.
934 sent_message_awaiting_response: Option<usize>,
936 #[cfg(any(test, fuzzing))]
937 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
938 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
939 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
940 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
941 // is fine, but as a sanity check in our failure to generate the second claim, we check here
942 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
943 historical_inbound_htlc_fulfills: HashSet<u64>,
945 /// This channel's type, as negotiated during channel open
946 channel_type: ChannelTypeFeatures,
948 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
949 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
950 // the channel's funding UTXO.
952 // We also use this when sending our peer a channel_update that isn't to be broadcasted
953 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
954 // associated channel mapping.
956 // We only bother storing the most recent SCID alias at any time, though our counterparty has
957 // to store all of them.
958 latest_inbound_scid_alias: Option<u64>,
960 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
961 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
962 // don't currently support node id aliases and eventually privacy should be provided with
963 // blinded paths instead of simple scid+node_id aliases.
964 outbound_scid_alias: u64,
966 // We track whether we already emitted a `ChannelPending` event.
967 channel_pending_event_emitted: bool,
969 // We track whether we already emitted a `ChannelReady` event.
970 channel_ready_event_emitted: bool,
972 /// The unique identifier used to re-derive the private key material for the channel through
973 /// [`SignerProvider::derive_channel_signer`].
974 channel_keys_id: [u8; 32],
976 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
977 /// store it here and only release it to the `ChannelManager` once it asks for it.
978 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
981 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
982 /// Allowed in any state (including after shutdown)
983 pub fn get_update_time_counter(&self) -> u32 {
984 self.update_time_counter
987 pub fn get_latest_monitor_update_id(&self) -> u64 {
988 self.latest_monitor_update_id
991 pub fn should_announce(&self) -> bool {
992 self.config.announced_channel
995 pub fn is_outbound(&self) -> bool {
996 self.channel_transaction_parameters.is_outbound_from_holder
999 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1000 /// Allowed in any state (including after shutdown)
1001 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1002 self.config.options.forwarding_fee_base_msat
1005 /// Returns true if we've ever received a message from the remote end for this Channel
1006 pub fn have_received_message(&self) -> bool {
1007 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1010 /// Returns true if this channel is fully established and not known to be closing.
1011 /// Allowed in any state (including after shutdown)
1012 pub fn is_usable(&self) -> bool {
1013 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1014 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1017 /// shutdown state returns the state of the channel in its various stages of shutdown
1018 pub fn shutdown_state(&self) -> ChannelShutdownState {
1019 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1020 return ChannelShutdownState::ShutdownComplete;
1022 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1023 return ChannelShutdownState::ShutdownInitiated;
1025 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1026 return ChannelShutdownState::ResolvingHTLCs;
1028 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1029 return ChannelShutdownState::NegotiatingClosingFee;
1031 return ChannelShutdownState::NotShuttingDown;
1034 fn closing_negotiation_ready(&self) -> bool {
1035 self.pending_inbound_htlcs.is_empty() &&
1036 self.pending_outbound_htlcs.is_empty() &&
1037 self.pending_update_fee.is_none() &&
1038 self.channel_state &
1039 (BOTH_SIDES_SHUTDOWN_MASK |
1040 ChannelState::AwaitingRemoteRevoke as u32 |
1041 ChannelState::PeerDisconnected as u32 |
1042 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1045 /// Returns true if this channel is currently available for use. This is a superset of
1046 /// is_usable() and considers things like the channel being temporarily disabled.
1047 /// Allowed in any state (including after shutdown)
1048 pub fn is_live(&self) -> bool {
1049 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1052 // Public utilities:
1054 pub fn channel_id(&self) -> ChannelId {
1058 // Return the `temporary_channel_id` used during channel establishment.
1060 // Will return `None` for channels created prior to LDK version 0.0.115.
1061 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1062 self.temporary_channel_id
1065 pub fn minimum_depth(&self) -> Option<u32> {
1069 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1070 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1071 pub fn get_user_id(&self) -> u128 {
1075 /// Gets the channel's type
1076 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1080 /// Gets the channel's `short_channel_id`.
1082 /// Will return `None` if the channel hasn't been confirmed yet.
1083 pub fn get_short_channel_id(&self) -> Option<u64> {
1084 self.short_channel_id
1087 /// Allowed in any state (including after shutdown)
1088 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1089 self.latest_inbound_scid_alias
1092 /// Allowed in any state (including after shutdown)
1093 pub fn outbound_scid_alias(&self) -> u64 {
1094 self.outbound_scid_alias
1097 /// Returns the holder signer for this channel.
1099 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1100 return &self.holder_signer
1103 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1104 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1105 /// or prior to any channel actions during `Channel` initialization.
1106 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1107 debug_assert_eq!(self.outbound_scid_alias, 0);
1108 self.outbound_scid_alias = outbound_scid_alias;
1111 /// Returns the funding_txo we either got from our peer, or were given by
1112 /// get_funding_created.
1113 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1114 self.channel_transaction_parameters.funding_outpoint
1117 /// Returns the height in which our funding transaction was confirmed.
1118 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1119 let conf_height = self.funding_tx_confirmation_height;
1120 if conf_height > 0 {
1127 /// Returns the block hash in which our funding transaction was confirmed.
1128 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1129 self.funding_tx_confirmed_in
1132 /// Returns the current number of confirmations on the funding transaction.
1133 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1134 if self.funding_tx_confirmation_height == 0 {
1135 // We either haven't seen any confirmation yet, or observed a reorg.
1139 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1142 fn get_holder_selected_contest_delay(&self) -> u16 {
1143 self.channel_transaction_parameters.holder_selected_contest_delay
1146 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1147 &self.channel_transaction_parameters.holder_pubkeys
1150 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1151 self.channel_transaction_parameters.counterparty_parameters
1152 .as_ref().map(|params| params.selected_contest_delay)
1155 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1156 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1159 /// Allowed in any state (including after shutdown)
1160 pub fn get_counterparty_node_id(&self) -> PublicKey {
1161 self.counterparty_node_id
1164 /// Allowed in any state (including after shutdown)
1165 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1166 self.holder_htlc_minimum_msat
1169 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1170 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1171 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1174 /// Allowed in any state (including after shutdown)
1175 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1177 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1178 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1179 // channel might have been used to route very small values (either by honest users or as DoS).
1180 self.channel_value_satoshis * 1000 * 9 / 10,
1182 self.counterparty_max_htlc_value_in_flight_msat
1186 /// Allowed in any state (including after shutdown)
1187 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1188 self.counterparty_htlc_minimum_msat
1191 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1192 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1193 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1196 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1197 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1198 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1200 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1201 party_max_htlc_value_in_flight_msat
1206 pub fn get_value_satoshis(&self) -> u64 {
1207 self.channel_value_satoshis
1210 pub fn get_fee_proportional_millionths(&self) -> u32 {
1211 self.config.options.forwarding_fee_proportional_millionths
1214 pub fn get_cltv_expiry_delta(&self) -> u16 {
1215 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1218 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1219 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1220 where F::Target: FeeEstimator
1222 match self.config.options.max_dust_htlc_exposure {
1223 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1224 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1225 ConfirmationTarget::OnChainSweep) as u64;
1226 feerate_per_kw.saturating_mul(multiplier)
1228 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1232 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1233 pub fn prev_config(&self) -> Option<ChannelConfig> {
1234 self.prev_config.map(|prev_config| prev_config.0)
1237 // Checks whether we should emit a `ChannelPending` event.
1238 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1239 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1242 // Returns whether we already emitted a `ChannelPending` event.
1243 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1244 self.channel_pending_event_emitted
1247 // Remembers that we already emitted a `ChannelPending` event.
1248 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1249 self.channel_pending_event_emitted = true;
1252 // Checks whether we should emit a `ChannelReady` event.
1253 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1254 self.is_usable() && !self.channel_ready_event_emitted
1257 // Remembers that we already emitted a `ChannelReady` event.
1258 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1259 self.channel_ready_event_emitted = true;
1262 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1263 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1264 /// no longer be considered when forwarding HTLCs.
1265 pub fn maybe_expire_prev_config(&mut self) {
1266 if self.prev_config.is_none() {
1269 let prev_config = self.prev_config.as_mut().unwrap();
1271 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1272 self.prev_config = None;
1276 /// Returns the current [`ChannelConfig`] applied to the channel.
1277 pub fn config(&self) -> ChannelConfig {
1281 /// Updates the channel's config. A bool is returned indicating whether the config update
1282 /// applied resulted in a new ChannelUpdate message.
1283 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1284 let did_channel_update =
1285 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1286 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1287 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1288 if did_channel_update {
1289 self.prev_config = Some((self.config.options, 0));
1290 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1291 // policy change to propagate throughout the network.
1292 self.update_time_counter += 1;
1294 self.config.options = *config;
1298 /// Returns true if funding_signed was sent/received and the
1299 /// funding transaction has been broadcast if necessary.
1300 pub fn is_funding_broadcast(&self) -> bool {
1301 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1302 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1305 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1306 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1307 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1308 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1309 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1311 /// @local is used only to convert relevant internal structures which refer to remote vs local
1312 /// to decide value of outputs and direction of HTLCs.
1313 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1314 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1315 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1316 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1317 /// which peer generated this transaction and "to whom" this transaction flows.
1319 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1320 where L::Target: Logger
1322 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1323 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1324 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1326 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1327 let mut remote_htlc_total_msat = 0;
1328 let mut local_htlc_total_msat = 0;
1329 let mut value_to_self_msat_offset = 0;
1331 let mut feerate_per_kw = self.feerate_per_kw;
1332 if let Some((feerate, update_state)) = self.pending_update_fee {
1333 if match update_state {
1334 // Note that these match the inclusion criteria when scanning
1335 // pending_inbound_htlcs below.
1336 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1337 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1338 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1340 feerate_per_kw = feerate;
1344 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1345 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1346 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1348 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1350 macro_rules! get_htlc_in_commitment {
1351 ($htlc: expr, $offered: expr) => {
1352 HTLCOutputInCommitment {
1354 amount_msat: $htlc.amount_msat,
1355 cltv_expiry: $htlc.cltv_expiry,
1356 payment_hash: $htlc.payment_hash,
1357 transaction_output_index: None
1362 macro_rules! add_htlc_output {
1363 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1364 if $outbound == local { // "offered HTLC output"
1365 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1366 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1369 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1371 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1372 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1373 included_non_dust_htlcs.push((htlc_in_tx, $source));
1375 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1376 included_dust_htlcs.push((htlc_in_tx, $source));
1379 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1380 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1383 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1385 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1386 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1387 included_non_dust_htlcs.push((htlc_in_tx, $source));
1389 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1390 included_dust_htlcs.push((htlc_in_tx, $source));
1396 for ref htlc in self.pending_inbound_htlcs.iter() {
1397 let (include, state_name) = match htlc.state {
1398 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1399 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1400 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1401 InboundHTLCState::Committed => (true, "Committed"),
1402 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1406 add_htlc_output!(htlc, false, None, state_name);
1407 remote_htlc_total_msat += htlc.amount_msat;
1409 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1411 &InboundHTLCState::LocalRemoved(ref reason) => {
1412 if generated_by_local {
1413 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1414 value_to_self_msat_offset += htlc.amount_msat as i64;
1423 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1425 for ref htlc in self.pending_outbound_htlcs.iter() {
1426 let (include, state_name) = match htlc.state {
1427 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1428 OutboundHTLCState::Committed => (true, "Committed"),
1429 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1430 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1431 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1434 let preimage_opt = match htlc.state {
1435 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1436 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1437 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1441 if let Some(preimage) = preimage_opt {
1442 preimages.push(preimage);
1446 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1447 local_htlc_total_msat += htlc.amount_msat;
1449 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1451 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1452 value_to_self_msat_offset -= htlc.amount_msat as i64;
1454 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1455 if !generated_by_local {
1456 value_to_self_msat_offset -= htlc.amount_msat as i64;
1464 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1465 assert!(value_to_self_msat >= 0);
1466 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1467 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1468 // "violate" their reserve value by couting those against it. Thus, we have to convert
1469 // everything to i64 before subtracting as otherwise we can overflow.
1470 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1471 assert!(value_to_remote_msat >= 0);
1473 #[cfg(debug_assertions)]
1475 // Make sure that the to_self/to_remote is always either past the appropriate
1476 // channel_reserve *or* it is making progress towards it.
1477 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1478 self.holder_max_commitment_tx_output.lock().unwrap()
1480 self.counterparty_max_commitment_tx_output.lock().unwrap()
1482 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1483 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1484 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1485 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1488 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1489 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1490 let (value_to_self, value_to_remote) = if self.is_outbound() {
1491 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1493 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1496 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1497 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1498 let (funding_pubkey_a, funding_pubkey_b) = if local {
1499 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1501 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1504 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1505 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1510 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1511 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1516 let num_nondust_htlcs = included_non_dust_htlcs.len();
1518 let channel_parameters =
1519 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1520 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1521 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1528 &mut included_non_dust_htlcs,
1531 let mut htlcs_included = included_non_dust_htlcs;
1532 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1533 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1534 htlcs_included.append(&mut included_dust_htlcs);
1536 // For the stats, trimmed-to-0 the value in msats accordingly
1537 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1538 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1546 local_balance_msat: value_to_self_msat as u64,
1547 remote_balance_msat: value_to_remote_msat as u64,
1553 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1554 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1555 /// our counterparty!)
1556 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1557 /// TODO Some magic rust shit to compile-time check this?
1558 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1559 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1560 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1561 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1562 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1564 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1568 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1569 /// will sign and send to our counterparty.
1570 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1571 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1572 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1573 //may see payments to it!
1574 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1575 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1576 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1578 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1581 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1582 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1583 /// Panics if called before accept_channel/InboundV1Channel::new
1584 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1585 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1588 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1589 &self.get_counterparty_pubkeys().funding_pubkey
1592 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1596 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1597 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1598 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1599 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1600 // more dust balance if the feerate increases when we have several HTLCs pending
1601 // which are near the dust limit.
1602 let mut feerate_per_kw = self.feerate_per_kw;
1603 // If there's a pending update fee, use it to ensure we aren't under-estimating
1604 // potential feerate updates coming soon.
1605 if let Some((feerate, _)) = self.pending_update_fee {
1606 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1608 if let Some(feerate) = outbound_feerate_update {
1609 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1611 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1614 /// Get forwarding information for the counterparty.
1615 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1616 self.counterparty_forwarding_info.clone()
1619 /// Returns a HTLCStats about inbound pending htlcs
1620 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1622 let mut stats = HTLCStats {
1623 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1624 pending_htlcs_value_msat: 0,
1625 on_counterparty_tx_dust_exposure_msat: 0,
1626 on_holder_tx_dust_exposure_msat: 0,
1627 holding_cell_msat: 0,
1628 on_holder_tx_holding_cell_htlcs_count: 0,
1631 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1634 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1635 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1636 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1638 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1639 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1640 for ref htlc in context.pending_inbound_htlcs.iter() {
1641 stats.pending_htlcs_value_msat += htlc.amount_msat;
1642 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1643 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1645 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1646 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1652 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1653 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1655 let mut stats = HTLCStats {
1656 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1657 pending_htlcs_value_msat: 0,
1658 on_counterparty_tx_dust_exposure_msat: 0,
1659 on_holder_tx_dust_exposure_msat: 0,
1660 holding_cell_msat: 0,
1661 on_holder_tx_holding_cell_htlcs_count: 0,
1664 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1667 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1668 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1669 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1671 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1672 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1673 for ref htlc in context.pending_outbound_htlcs.iter() {
1674 stats.pending_htlcs_value_msat += htlc.amount_msat;
1675 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1676 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1678 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1679 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1683 for update in context.holding_cell_htlc_updates.iter() {
1684 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1685 stats.pending_htlcs += 1;
1686 stats.pending_htlcs_value_msat += amount_msat;
1687 stats.holding_cell_msat += amount_msat;
1688 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1689 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1691 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1692 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1694 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1701 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1702 /// Doesn't bother handling the
1703 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1704 /// corner case properly.
1705 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1706 -> AvailableBalances
1707 where F::Target: FeeEstimator
1709 let context = &self;
1710 // Note that we have to handle overflow due to the above case.
1711 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1712 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1714 let mut balance_msat = context.value_to_self_msat;
1715 for ref htlc in context.pending_inbound_htlcs.iter() {
1716 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1717 balance_msat += htlc.amount_msat;
1720 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1722 let outbound_capacity_msat = context.value_to_self_msat
1723 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1725 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1727 let mut available_capacity_msat = outbound_capacity_msat;
1729 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1730 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1734 if context.is_outbound() {
1735 // We should mind channel commit tx fee when computing how much of the available capacity
1736 // can be used in the next htlc. Mirrors the logic in send_htlc.
1738 // The fee depends on whether the amount we will be sending is above dust or not,
1739 // and the answer will in turn change the amount itself — making it a circular
1741 // This complicates the computation around dust-values, up to the one-htlc-value.
1742 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1743 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1744 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1747 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1748 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1749 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1750 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1751 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1752 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1753 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1756 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1757 // value ends up being below dust, we have this fee available again. In that case,
1758 // match the value to right-below-dust.
1759 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1760 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1761 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1762 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1763 debug_assert!(one_htlc_difference_msat != 0);
1764 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1765 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1766 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1768 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1771 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1772 // sending a new HTLC won't reduce their balance below our reserve threshold.
1773 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1774 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1775 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1778 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1779 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1781 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1782 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1783 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1785 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1786 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1787 // we've selected for them, we can only send dust HTLCs.
1788 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1792 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1794 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1795 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1796 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1797 // send above the dust limit (as the router can always overpay to meet the dust limit).
1798 let mut remaining_msat_below_dust_exposure_limit = None;
1799 let mut dust_exposure_dust_limit_msat = 0;
1800 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1802 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1803 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1805 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1806 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1807 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1809 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1810 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1811 remaining_msat_below_dust_exposure_limit =
1812 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1813 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1816 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1817 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1818 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1819 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1820 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1821 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1824 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1825 if available_capacity_msat < dust_exposure_dust_limit_msat {
1826 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1828 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1832 available_capacity_msat = cmp::min(available_capacity_msat,
1833 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1835 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1836 available_capacity_msat = 0;
1840 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1841 - context.value_to_self_msat as i64
1842 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1843 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1845 outbound_capacity_msat,
1846 next_outbound_htlc_limit_msat: available_capacity_msat,
1847 next_outbound_htlc_minimum_msat,
1852 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1853 let context = &self;
1854 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1857 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1858 /// number of pending HTLCs that are on track to be in our next commitment tx.
1860 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1861 /// `fee_spike_buffer_htlc` is `Some`.
1863 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1864 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1866 /// Dust HTLCs are excluded.
1867 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1868 let context = &self;
1869 assert!(context.is_outbound());
1871 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1874 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1875 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1877 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1878 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1880 let mut addl_htlcs = 0;
1881 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1883 HTLCInitiator::LocalOffered => {
1884 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1888 HTLCInitiator::RemoteOffered => {
1889 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1895 let mut included_htlcs = 0;
1896 for ref htlc in context.pending_inbound_htlcs.iter() {
1897 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1900 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1901 // transaction including this HTLC if it times out before they RAA.
1902 included_htlcs += 1;
1905 for ref htlc in context.pending_outbound_htlcs.iter() {
1906 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1910 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1911 OutboundHTLCState::Committed => included_htlcs += 1,
1912 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1913 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1914 // transaction won't be generated until they send us their next RAA, which will mean
1915 // dropping any HTLCs in this state.
1920 for htlc in context.holding_cell_htlc_updates.iter() {
1922 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1923 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1928 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1929 // ack we're guaranteed to never include them in commitment txs anymore.
1933 let num_htlcs = included_htlcs + addl_htlcs;
1934 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1935 #[cfg(any(test, fuzzing))]
1938 if fee_spike_buffer_htlc.is_some() {
1939 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1941 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1942 + context.holding_cell_htlc_updates.len();
1943 let commitment_tx_info = CommitmentTxInfoCached {
1945 total_pending_htlcs,
1946 next_holder_htlc_id: match htlc.origin {
1947 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1948 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1950 next_counterparty_htlc_id: match htlc.origin {
1951 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1952 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1954 feerate: context.feerate_per_kw,
1956 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1961 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1962 /// pending HTLCs that are on track to be in their next commitment tx
1964 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1965 /// `fee_spike_buffer_htlc` is `Some`.
1967 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1968 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1970 /// Dust HTLCs are excluded.
1971 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1972 let context = &self;
1973 assert!(!context.is_outbound());
1975 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1978 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1979 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1981 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1982 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1984 let mut addl_htlcs = 0;
1985 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1987 HTLCInitiator::LocalOffered => {
1988 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1992 HTLCInitiator::RemoteOffered => {
1993 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1999 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2000 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2001 // committed outbound HTLCs, see below.
2002 let mut included_htlcs = 0;
2003 for ref htlc in context.pending_inbound_htlcs.iter() {
2004 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2007 included_htlcs += 1;
2010 for ref htlc in context.pending_outbound_htlcs.iter() {
2011 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2014 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2015 // i.e. if they've responded to us with an RAA after announcement.
2017 OutboundHTLCState::Committed => included_htlcs += 1,
2018 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2019 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2024 let num_htlcs = included_htlcs + addl_htlcs;
2025 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2026 #[cfg(any(test, fuzzing))]
2029 if fee_spike_buffer_htlc.is_some() {
2030 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2032 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2033 let commitment_tx_info = CommitmentTxInfoCached {
2035 total_pending_htlcs,
2036 next_holder_htlc_id: match htlc.origin {
2037 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2038 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2040 next_counterparty_htlc_id: match htlc.origin {
2041 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2042 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2044 feerate: context.feerate_per_kw,
2046 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2051 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2052 where F: Fn() -> Option<O> {
2053 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2054 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2061 /// Returns the transaction if there is a pending funding transaction that is yet to be
2063 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2064 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2067 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2069 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2070 self.if_unbroadcasted_funding(||
2071 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2075 /// Returns whether the channel is funded in a batch.
2076 pub fn is_batch_funding(&self) -> bool {
2077 self.is_batch_funding.is_some()
2080 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2082 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2083 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2086 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2087 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2088 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2089 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2090 /// immediately (others we will have to allow to time out).
2091 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2092 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2093 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2094 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2095 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2096 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2098 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2099 // return them to fail the payment.
2100 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2101 let counterparty_node_id = self.get_counterparty_node_id();
2102 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2104 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2105 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2110 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2111 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2112 // returning a channel monitor update here would imply a channel monitor update before
2113 // we even registered the channel monitor to begin with, which is invalid.
2114 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2115 // funding transaction, don't return a funding txo (which prevents providing the
2116 // monitor update to the user, even if we return one).
2117 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2118 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2119 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2120 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2121 update_id: self.latest_monitor_update_id,
2122 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2126 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2128 self.channel_state = ChannelState::ShutdownComplete as u32;
2129 self.update_time_counter += 1;
2132 dropped_outbound_htlcs,
2133 unbroadcasted_batch_funding_txid,
2137 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2138 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2139 let counterparty_keys = self.build_remote_transaction_keys();
2140 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2141 let signature = match &self.holder_signer {
2142 // TODO (taproot|arik): move match into calling method for Taproot
2143 ChannelSignerType::Ecdsa(ecdsa) => {
2144 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2145 .map(|(sig, _)| sig).ok()?
2147 // TODO (taproot|arik)
2152 if self.signer_pending_funding {
2153 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2154 self.signer_pending_funding = false;
2157 Some(msgs::FundingCreated {
2158 temporary_channel_id: self.temporary_channel_id.unwrap(),
2159 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2160 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2163 partial_signature_with_nonce: None,
2165 next_local_nonce: None,
2169 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2170 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2171 let counterparty_keys = self.build_remote_transaction_keys();
2172 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2174 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2175 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2176 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2177 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2179 match &self.holder_signer {
2180 // TODO (arik): move match into calling method for Taproot
2181 ChannelSignerType::Ecdsa(ecdsa) => {
2182 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2183 .map(|(signature, _)| msgs::FundingSigned {
2184 channel_id: self.channel_id(),
2187 partial_signature_with_nonce: None,
2191 if funding_signed.is_none() {
2192 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2193 self.signer_pending_funding = true;
2194 } else if self.signer_pending_funding {
2195 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2196 self.signer_pending_funding = false;
2199 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2200 (counterparty_initial_commitment_tx, funding_signed)
2202 // TODO (taproot|arik)
2209 // Internal utility functions for channels
2211 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2212 /// `channel_value_satoshis` in msat, set through
2213 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2215 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2217 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2218 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2219 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2221 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2224 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2226 channel_value_satoshis * 10 * configured_percent
2229 /// Returns a minimum channel reserve value the remote needs to maintain,
2230 /// required by us according to the configured or default
2231 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2233 /// Guaranteed to return a value no larger than channel_value_satoshis
2235 /// This is used both for outbound and inbound channels and has lower bound
2236 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2237 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2238 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2239 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2242 /// This is for legacy reasons, present for forward-compatibility.
2243 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2244 /// from storage. Hence, we use this function to not persist default values of
2245 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2246 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2247 let (q, _) = channel_value_satoshis.overflowing_div(100);
2248 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2251 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2252 // Note that num_htlcs should not include dust HTLCs.
2254 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2255 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2258 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2259 // Note that num_htlcs should not include dust HTLCs.
2260 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2261 // Note that we need to divide before multiplying to round properly,
2262 // since the lowest denomination of bitcoin on-chain is the satoshi.
2263 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2266 // Holder designates channel data owned for the benefit of the user client.
2267 // Counterparty designates channel data owned by the another channel participant entity.
2268 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2269 pub context: ChannelContext<SP>,
2272 #[cfg(any(test, fuzzing))]
2273 struct CommitmentTxInfoCached {
2275 total_pending_htlcs: usize,
2276 next_holder_htlc_id: u64,
2277 next_counterparty_htlc_id: u64,
2281 impl<SP: Deref> Channel<SP> where
2282 SP::Target: SignerProvider,
2283 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2285 fn check_remote_fee<F: Deref, L: Deref>(
2286 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2287 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2288 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2290 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2291 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2293 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2295 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2296 if feerate_per_kw < lower_limit {
2297 if let Some(cur_feerate) = cur_feerate_per_kw {
2298 if feerate_per_kw > cur_feerate {
2300 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2301 cur_feerate, feerate_per_kw);
2305 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2311 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2312 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2313 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2314 // outside of those situations will fail.
2315 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2319 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2324 1 + // script length (0)
2328 )*4 + // * 4 for non-witness parts
2329 2 + // witness marker and flag
2330 1 + // witness element count
2331 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2332 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2333 2*(1 + 71); // two signatures + sighash type flags
2334 if let Some(spk) = a_scriptpubkey {
2335 ret += ((8+1) + // output values and script length
2336 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2338 if let Some(spk) = b_scriptpubkey {
2339 ret += ((8+1) + // output values and script length
2340 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2346 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2347 assert!(self.context.pending_inbound_htlcs.is_empty());
2348 assert!(self.context.pending_outbound_htlcs.is_empty());
2349 assert!(self.context.pending_update_fee.is_none());
2351 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2352 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2353 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2355 if value_to_holder < 0 {
2356 assert!(self.context.is_outbound());
2357 total_fee_satoshis += (-value_to_holder) as u64;
2358 } else if value_to_counterparty < 0 {
2359 assert!(!self.context.is_outbound());
2360 total_fee_satoshis += (-value_to_counterparty) as u64;
2363 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2364 value_to_counterparty = 0;
2367 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2368 value_to_holder = 0;
2371 assert!(self.context.shutdown_scriptpubkey.is_some());
2372 let holder_shutdown_script = self.get_closing_scriptpubkey();
2373 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2374 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2376 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2377 (closing_transaction, total_fee_satoshis)
2380 fn funding_outpoint(&self) -> OutPoint {
2381 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2384 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2387 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2388 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2390 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2392 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2393 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2394 where L::Target: Logger {
2395 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2396 // (see equivalent if condition there).
2397 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2398 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2399 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2400 self.context.latest_monitor_update_id = mon_update_id;
2401 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2402 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2406 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2407 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2408 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2409 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2411 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2412 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2414 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2416 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2417 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2418 // these, but for now we just have to treat them as normal.
2420 let mut pending_idx = core::usize::MAX;
2421 let mut htlc_value_msat = 0;
2422 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2423 if htlc.htlc_id == htlc_id_arg {
2424 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2425 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2426 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2428 InboundHTLCState::Committed => {},
2429 InboundHTLCState::LocalRemoved(ref reason) => {
2430 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2432 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2433 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2435 return UpdateFulfillFetch::DuplicateClaim {};
2438 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2439 // Don't return in release mode here so that we can update channel_monitor
2443 htlc_value_msat = htlc.amount_msat;
2447 if pending_idx == core::usize::MAX {
2448 #[cfg(any(test, fuzzing))]
2449 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2450 // this is simply a duplicate claim, not previously failed and we lost funds.
2451 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2452 return UpdateFulfillFetch::DuplicateClaim {};
2455 // Now update local state:
2457 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2458 // can claim it even if the channel hits the chain before we see their next commitment.
2459 self.context.latest_monitor_update_id += 1;
2460 let monitor_update = ChannelMonitorUpdate {
2461 update_id: self.context.latest_monitor_update_id,
2462 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2463 payment_preimage: payment_preimage_arg.clone(),
2467 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2468 // Note that this condition is the same as the assertion in
2469 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2470 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2471 // do not not get into this branch.
2472 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2473 match pending_update {
2474 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2475 if htlc_id_arg == htlc_id {
2476 // Make sure we don't leave latest_monitor_update_id incremented here:
2477 self.context.latest_monitor_update_id -= 1;
2478 #[cfg(any(test, fuzzing))]
2479 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2480 return UpdateFulfillFetch::DuplicateClaim {};
2483 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2484 if htlc_id_arg == htlc_id {
2485 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2486 // TODO: We may actually be able to switch to a fulfill here, though its
2487 // rare enough it may not be worth the complexity burden.
2488 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2489 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2495 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2496 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2497 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2499 #[cfg(any(test, fuzzing))]
2500 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2501 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2503 #[cfg(any(test, fuzzing))]
2504 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2507 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2508 if let InboundHTLCState::Committed = htlc.state {
2510 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2511 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2513 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2514 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2517 UpdateFulfillFetch::NewClaim {
2520 msg: Some(msgs::UpdateFulfillHTLC {
2521 channel_id: self.context.channel_id(),
2522 htlc_id: htlc_id_arg,
2523 payment_preimage: payment_preimage_arg,
2528 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2529 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2530 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2531 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2532 // Even if we aren't supposed to let new monitor updates with commitment state
2533 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2534 // matter what. Sadly, to push a new monitor update which flies before others
2535 // already queued, we have to insert it into the pending queue and update the
2536 // update_ids of all the following monitors.
2537 if release_cs_monitor && msg.is_some() {
2538 let mut additional_update = self.build_commitment_no_status_check(logger);
2539 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2540 // to be strictly increasing by one, so decrement it here.
2541 self.context.latest_monitor_update_id = monitor_update.update_id;
2542 monitor_update.updates.append(&mut additional_update.updates);
2544 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2545 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2546 monitor_update.update_id = new_mon_id;
2547 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2548 held_update.update.update_id += 1;
2551 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2552 let update = self.build_commitment_no_status_check(logger);
2553 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2559 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2560 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2562 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2566 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2567 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2568 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2569 /// before we fail backwards.
2571 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2572 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2573 /// [`ChannelError::Ignore`].
2574 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2575 -> Result<(), ChannelError> where L::Target: Logger {
2576 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2577 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2580 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2581 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2582 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2583 /// before we fail backwards.
2585 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2586 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2587 /// [`ChannelError::Ignore`].
2588 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2589 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2590 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2591 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2593 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2595 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2596 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2597 // these, but for now we just have to treat them as normal.
2599 let mut pending_idx = core::usize::MAX;
2600 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2601 if htlc.htlc_id == htlc_id_arg {
2603 InboundHTLCState::Committed => {},
2604 InboundHTLCState::LocalRemoved(ref reason) => {
2605 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2607 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2612 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2613 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2619 if pending_idx == core::usize::MAX {
2620 #[cfg(any(test, fuzzing))]
2621 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2622 // is simply a duplicate fail, not previously failed and we failed-back too early.
2623 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2627 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2628 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2629 force_holding_cell = true;
2632 // Now update local state:
2633 if force_holding_cell {
2634 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2635 match pending_update {
2636 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2637 if htlc_id_arg == htlc_id {
2638 #[cfg(any(test, fuzzing))]
2639 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2643 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2644 if htlc_id_arg == htlc_id {
2645 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2646 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2652 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2653 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2654 htlc_id: htlc_id_arg,
2660 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2662 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2663 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2666 Ok(Some(msgs::UpdateFailHTLC {
2667 channel_id: self.context.channel_id(),
2668 htlc_id: htlc_id_arg,
2673 // Message handlers:
2675 /// Handles a funding_signed message from the remote end.
2676 /// If this call is successful, broadcast the funding transaction (and not before!)
2677 pub fn funding_signed<L: Deref>(
2678 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2679 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2683 if !self.context.is_outbound() {
2684 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2686 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2687 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2689 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2690 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2691 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2692 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2695 let funding_script = self.context.get_funding_redeemscript();
2697 let counterparty_keys = self.context.build_remote_transaction_keys();
2698 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2699 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2700 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2702 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2703 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2705 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2706 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2708 let trusted_tx = initial_commitment_tx.trust();
2709 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2710 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2711 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2712 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2713 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2717 let holder_commitment_tx = HolderCommitmentTransaction::new(
2718 initial_commitment_tx,
2721 &self.context.get_holder_pubkeys().funding_pubkey,
2722 self.context.counterparty_funding_pubkey()
2725 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2726 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2729 let funding_redeemscript = self.context.get_funding_redeemscript();
2730 let funding_txo = self.context.get_funding_txo().unwrap();
2731 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2732 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2733 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2734 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2735 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2736 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2737 shutdown_script, self.context.get_holder_selected_contest_delay(),
2738 &self.context.destination_script, (funding_txo, funding_txo_script),
2739 &self.context.channel_transaction_parameters,
2740 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2742 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2744 channel_monitor.provide_initial_counterparty_commitment_tx(
2745 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2746 self.context.cur_counterparty_commitment_transaction_number,
2747 self.context.counterparty_cur_commitment_point.unwrap(),
2748 counterparty_initial_commitment_tx.feerate_per_kw(),
2749 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2750 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2752 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2753 if self.context.is_batch_funding() {
2754 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2756 self.context.channel_state = ChannelState::FundingSent as u32;
2758 self.context.cur_holder_commitment_transaction_number -= 1;
2759 self.context.cur_counterparty_commitment_transaction_number -= 1;
2761 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2763 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2764 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2768 /// Updates the state of the channel to indicate that all channels in the batch have received
2769 /// funding_signed and persisted their monitors.
2770 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2771 /// treated as a non-batch channel going forward.
2772 pub fn set_batch_ready(&mut self) {
2773 self.context.is_batch_funding = None;
2774 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2777 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2778 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2780 pub fn channel_ready<NS: Deref, L: Deref>(
2781 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2782 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2783 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2785 NS::Target: NodeSigner,
2788 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2789 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2790 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2793 if let Some(scid_alias) = msg.short_channel_id_alias {
2794 if Some(scid_alias) != self.context.short_channel_id {
2795 // The scid alias provided can be used to route payments *from* our counterparty,
2796 // i.e. can be used for inbound payments and provided in invoices, but is not used
2797 // when routing outbound payments.
2798 self.context.latest_inbound_scid_alias = Some(scid_alias);
2802 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2804 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2805 // batch, but we can receive channel_ready messages.
2807 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2808 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2810 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2811 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2812 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2813 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2814 self.context.update_time_counter += 1;
2815 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2816 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2817 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2818 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2820 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2821 // required, or they're sending a fresh SCID alias.
2822 let expected_point =
2823 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2824 // If they haven't ever sent an updated point, the point they send should match
2826 self.context.counterparty_cur_commitment_point
2827 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2828 // If we've advanced the commitment number once, the second commitment point is
2829 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2830 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2831 self.context.counterparty_prev_commitment_point
2833 // If they have sent updated points, channel_ready is always supposed to match
2834 // their "first" point, which we re-derive here.
2835 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2836 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2837 ).expect("We already advanced, so previous secret keys should have been validated already")))
2839 if expected_point != Some(msg.next_per_commitment_point) {
2840 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2844 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2847 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2848 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2850 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2852 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2855 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2856 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2857 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2858 ) -> Result<(), ChannelError>
2859 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2860 FE::Target: FeeEstimator, L::Target: Logger,
2862 // We can't accept HTLCs sent after we've sent a shutdown.
2863 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2864 if local_sent_shutdown {
2865 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2867 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2868 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2869 if remote_sent_shutdown {
2870 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2872 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2873 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2875 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2876 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2878 if msg.amount_msat == 0 {
2879 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2881 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2882 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2885 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2886 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2887 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2888 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2890 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2891 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2894 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2895 // the reserve_satoshis we told them to always have as direct payment so that they lose
2896 // something if we punish them for broadcasting an old state).
2897 // Note that we don't really care about having a small/no to_remote output in our local
2898 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2899 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2900 // present in the next commitment transaction we send them (at least for fulfilled ones,
2901 // failed ones won't modify value_to_self).
2902 // Note that we will send HTLCs which another instance of rust-lightning would think
2903 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2904 // Channel state once they will not be present in the next received commitment
2906 let mut removed_outbound_total_msat = 0;
2907 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2908 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2909 removed_outbound_total_msat += htlc.amount_msat;
2910 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2911 removed_outbound_total_msat += htlc.amount_msat;
2915 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2916 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2919 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2920 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2921 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2923 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2924 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2925 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2926 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2927 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2928 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2929 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2933 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2934 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2935 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2936 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2937 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2938 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2939 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2943 let pending_value_to_self_msat =
2944 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2945 let pending_remote_value_msat =
2946 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2947 if pending_remote_value_msat < msg.amount_msat {
2948 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2951 // Check that the remote can afford to pay for this HTLC on-chain at the current
2952 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2954 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2955 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2956 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2958 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2959 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2963 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2964 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2966 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2967 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2971 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2972 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2976 if !self.context.is_outbound() {
2977 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2978 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2979 // side, only on the sender's. Note that with anchor outputs we are no longer as
2980 // sensitive to fee spikes, so we need to account for them.
2981 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2982 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2983 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2984 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2986 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2987 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2988 // the HTLC, i.e. its status is already set to failing.
2989 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2990 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2993 // Check that they won't violate our local required channel reserve by adding this HTLC.
2994 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2995 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2996 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2997 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3000 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3001 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3003 if msg.cltv_expiry >= 500000000 {
3004 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3007 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3008 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3009 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3013 // Now update local state:
3014 self.context.next_counterparty_htlc_id += 1;
3015 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3016 htlc_id: msg.htlc_id,
3017 amount_msat: msg.amount_msat,
3018 payment_hash: msg.payment_hash,
3019 cltv_expiry: msg.cltv_expiry,
3020 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3025 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3027 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3028 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3029 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3030 if htlc.htlc_id == htlc_id {
3031 let outcome = match check_preimage {
3032 None => fail_reason.into(),
3033 Some(payment_preimage) => {
3034 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3035 if payment_hash != htlc.payment_hash {
3036 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3038 OutboundHTLCOutcome::Success(Some(payment_preimage))
3042 OutboundHTLCState::LocalAnnounced(_) =>
3043 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3044 OutboundHTLCState::Committed => {
3045 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3047 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3048 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3053 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3056 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3057 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3058 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3060 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3061 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3064 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3067 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3068 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3069 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3071 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3072 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3075 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3079 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3080 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3081 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3083 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3084 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3087 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3091 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3092 where L::Target: Logger
3094 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3095 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3097 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3098 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3100 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3101 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3104 let funding_script = self.context.get_funding_redeemscript();
3106 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3108 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3109 let commitment_txid = {
3110 let trusted_tx = commitment_stats.tx.trust();
3111 let bitcoin_tx = trusted_tx.built_transaction();
3112 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3114 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3115 log_bytes!(msg.signature.serialize_compact()[..]),
3116 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3117 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3118 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3119 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3123 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3125 // If our counterparty updated the channel fee in this commitment transaction, check that
3126 // they can actually afford the new fee now.
3127 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3128 update_state == FeeUpdateState::RemoteAnnounced
3131 debug_assert!(!self.context.is_outbound());
3132 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3133 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3134 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3137 #[cfg(any(test, fuzzing))]
3139 if self.context.is_outbound() {
3140 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3141 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3142 if let Some(info) = projected_commit_tx_info {
3143 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3144 + self.context.holding_cell_htlc_updates.len();
3145 if info.total_pending_htlcs == total_pending_htlcs
3146 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3147 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3148 && info.feerate == self.context.feerate_per_kw {
3149 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3155 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3156 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3159 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3160 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3161 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3162 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3163 // backwards compatibility, we never use it in production. To provide test coverage, here,
3164 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3165 #[allow(unused_assignments, unused_mut)]
3166 let mut separate_nondust_htlc_sources = false;
3167 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3168 use core::hash::{BuildHasher, Hasher};
3169 // Get a random value using the only std API to do so - the DefaultHasher
3170 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3171 separate_nondust_htlc_sources = rand_val % 2 == 0;
3174 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3175 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3176 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3177 if let Some(_) = htlc.transaction_output_index {
3178 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3179 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3180 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3182 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3183 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3184 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3185 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3186 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3187 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3188 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3189 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3191 if !separate_nondust_htlc_sources {
3192 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3195 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3197 if separate_nondust_htlc_sources {
3198 if let Some(source) = source_opt.take() {
3199 nondust_htlc_sources.push(source);
3202 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3205 let holder_commitment_tx = HolderCommitmentTransaction::new(
3206 commitment_stats.tx,
3208 msg.htlc_signatures.clone(),
3209 &self.context.get_holder_pubkeys().funding_pubkey,
3210 self.context.counterparty_funding_pubkey()
3213 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3214 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3216 // Update state now that we've passed all the can-fail calls...
3217 let mut need_commitment = false;
3218 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3219 if *update_state == FeeUpdateState::RemoteAnnounced {
3220 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3221 need_commitment = true;
3225 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3226 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3227 Some(forward_info.clone())
3229 if let Some(forward_info) = new_forward {
3230 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3231 &htlc.payment_hash, &self.context.channel_id);
3232 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3233 need_commitment = true;
3236 let mut claimed_htlcs = Vec::new();
3237 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3238 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3239 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3240 &htlc.payment_hash, &self.context.channel_id);
3241 // Grab the preimage, if it exists, instead of cloning
3242 let mut reason = OutboundHTLCOutcome::Success(None);
3243 mem::swap(outcome, &mut reason);
3244 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3245 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3246 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3247 // have a `Success(None)` reason. In this case we could forget some HTLC
3248 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3249 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3251 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3253 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3254 need_commitment = true;
3258 self.context.latest_monitor_update_id += 1;
3259 let mut monitor_update = ChannelMonitorUpdate {
3260 update_id: self.context.latest_monitor_update_id,
3261 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3262 commitment_tx: holder_commitment_tx,
3263 htlc_outputs: htlcs_and_sigs,
3265 nondust_htlc_sources,
3269 self.context.cur_holder_commitment_transaction_number -= 1;
3270 self.context.expecting_peer_commitment_signed = false;
3271 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3272 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3273 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3275 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3276 // In case we initially failed monitor updating without requiring a response, we need
3277 // to make sure the RAA gets sent first.
3278 self.context.monitor_pending_revoke_and_ack = true;
3279 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3280 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3281 // the corresponding HTLC status updates so that
3282 // get_last_commitment_update_for_send includes the right HTLCs.
3283 self.context.monitor_pending_commitment_signed = true;
3284 let mut additional_update = self.build_commitment_no_status_check(logger);
3285 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3286 // strictly increasing by one, so decrement it here.
3287 self.context.latest_monitor_update_id = monitor_update.update_id;
3288 monitor_update.updates.append(&mut additional_update.updates);
3290 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3291 &self.context.channel_id);
3292 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3295 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3296 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3297 // we'll send one right away when we get the revoke_and_ack when we
3298 // free_holding_cell_htlcs().
3299 let mut additional_update = self.build_commitment_no_status_check(logger);
3300 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3301 // strictly increasing by one, so decrement it here.
3302 self.context.latest_monitor_update_id = monitor_update.update_id;
3303 monitor_update.updates.append(&mut additional_update.updates);
3307 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3308 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3309 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3310 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3313 /// Public version of the below, checking relevant preconditions first.
3314 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3315 /// returns `(None, Vec::new())`.
3316 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3317 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3318 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3319 where F::Target: FeeEstimator, L::Target: Logger
3321 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3322 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3323 self.free_holding_cell_htlcs(fee_estimator, logger)
3324 } else { (None, Vec::new()) }
3327 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3328 /// for our counterparty.
3329 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3330 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3331 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3332 where F::Target: FeeEstimator, L::Target: Logger
3334 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3335 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3336 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3337 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3339 let mut monitor_update = ChannelMonitorUpdate {
3340 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3341 updates: Vec::new(),
3344 let mut htlc_updates = Vec::new();
3345 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3346 let mut update_add_count = 0;
3347 let mut update_fulfill_count = 0;
3348 let mut update_fail_count = 0;
3349 let mut htlcs_to_fail = Vec::new();
3350 for htlc_update in htlc_updates.drain(..) {
3351 // Note that this *can* fail, though it should be due to rather-rare conditions on
3352 // fee races with adding too many outputs which push our total payments just over
3353 // the limit. In case it's less rare than I anticipate, we may want to revisit
3354 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3355 // to rebalance channels.
3356 match &htlc_update {
3357 &HTLCUpdateAwaitingACK::AddHTLC {
3358 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3359 skimmed_fee_msat, ..
3361 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3362 onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3364 Ok(_) => update_add_count += 1,
3367 ChannelError::Ignore(ref msg) => {
3368 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3369 // If we fail to send here, then this HTLC should
3370 // be failed backwards. Failing to send here
3371 // indicates that this HTLC may keep being put back
3372 // into the holding cell without ever being
3373 // successfully forwarded/failed/fulfilled, causing
3374 // our counterparty to eventually close on us.
3375 htlcs_to_fail.push((source.clone(), *payment_hash));
3378 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3384 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3385 // If an HTLC claim was previously added to the holding cell (via
3386 // `get_update_fulfill_htlc`, then generating the claim message itself must
3387 // not fail - any in between attempts to claim the HTLC will have resulted
3388 // in it hitting the holding cell again and we cannot change the state of a
3389 // holding cell HTLC from fulfill to anything else.
3390 let mut additional_monitor_update =
3391 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3392 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3393 { monitor_update } else { unreachable!() };
3394 update_fulfill_count += 1;
3395 monitor_update.updates.append(&mut additional_monitor_update.updates);
3397 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3398 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3399 Ok(update_fail_msg_option) => {
3400 // If an HTLC failure was previously added to the holding cell (via
3401 // `queue_fail_htlc`) then generating the fail message itself must
3402 // not fail - we should never end up in a state where we double-fail
3403 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3404 // for a full revocation before failing.
3405 debug_assert!(update_fail_msg_option.is_some());
3406 update_fail_count += 1;
3409 if let ChannelError::Ignore(_) = e {}
3411 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3418 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3419 return (None, htlcs_to_fail);
3421 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3422 self.send_update_fee(feerate, false, fee_estimator, logger)
3427 let mut additional_update = self.build_commitment_no_status_check(logger);
3428 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3429 // but we want them to be strictly increasing by one, so reset it here.
3430 self.context.latest_monitor_update_id = monitor_update.update_id;
3431 monitor_update.updates.append(&mut additional_update.updates);
3433 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3434 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3435 update_add_count, update_fulfill_count, update_fail_count);
3437 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3438 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3444 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3445 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3446 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3447 /// generating an appropriate error *after* the channel state has been updated based on the
3448 /// revoke_and_ack message.
3449 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3450 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3451 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3452 where F::Target: FeeEstimator, L::Target: Logger,
3454 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3455 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3457 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3458 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3460 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3461 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3464 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3466 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3467 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3468 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3472 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3473 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3474 // haven't given them a new commitment transaction to broadcast). We should probably
3475 // take advantage of this by updating our channel monitor, sending them an error, and
3476 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3477 // lot of work, and there's some chance this is all a misunderstanding anyway.
3478 // We have to do *something*, though, since our signer may get mad at us for otherwise
3479 // jumping a remote commitment number, so best to just force-close and move on.
3480 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3483 #[cfg(any(test, fuzzing))]
3485 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3486 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3489 match &self.context.holder_signer {
3490 ChannelSignerType::Ecdsa(ecdsa) => {
3491 ecdsa.validate_counterparty_revocation(
3492 self.context.cur_counterparty_commitment_transaction_number + 1,
3494 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3496 // TODO (taproot|arik)
3501 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3502 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3503 self.context.latest_monitor_update_id += 1;
3504 let mut monitor_update = ChannelMonitorUpdate {
3505 update_id: self.context.latest_monitor_update_id,
3506 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3507 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3508 secret: msg.per_commitment_secret,
3512 // Update state now that we've passed all the can-fail calls...
3513 // (note that we may still fail to generate the new commitment_signed message, but that's
3514 // OK, we step the channel here and *then* if the new generation fails we can fail the
3515 // channel based on that, but stepping stuff here should be safe either way.
3516 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3517 self.context.sent_message_awaiting_response = None;
3518 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3519 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3520 self.context.cur_counterparty_commitment_transaction_number -= 1;
3522 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3523 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3526 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3527 let mut to_forward_infos = Vec::new();
3528 let mut revoked_htlcs = Vec::new();
3529 let mut finalized_claimed_htlcs = Vec::new();
3530 let mut update_fail_htlcs = Vec::new();
3531 let mut update_fail_malformed_htlcs = Vec::new();
3532 let mut require_commitment = false;
3533 let mut value_to_self_msat_diff: i64 = 0;
3536 // Take references explicitly so that we can hold multiple references to self.context.
3537 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3538 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3539 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3541 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3542 pending_inbound_htlcs.retain(|htlc| {
3543 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3544 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3545 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3546 value_to_self_msat_diff += htlc.amount_msat as i64;
3548 *expecting_peer_commitment_signed = true;
3552 pending_outbound_htlcs.retain(|htlc| {
3553 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3554 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3555 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3556 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3558 finalized_claimed_htlcs.push(htlc.source.clone());
3559 // They fulfilled, so we sent them money
3560 value_to_self_msat_diff -= htlc.amount_msat as i64;
3565 for htlc in pending_inbound_htlcs.iter_mut() {
3566 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3568 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3572 let mut state = InboundHTLCState::Committed;
3573 mem::swap(&mut state, &mut htlc.state);
3575 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3576 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3577 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3578 require_commitment = true;
3579 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3580 match forward_info {
3581 PendingHTLCStatus::Fail(fail_msg) => {
3582 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3583 require_commitment = true;
3585 HTLCFailureMsg::Relay(msg) => {
3586 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3587 update_fail_htlcs.push(msg)
3589 HTLCFailureMsg::Malformed(msg) => {
3590 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3591 update_fail_malformed_htlcs.push(msg)
3595 PendingHTLCStatus::Forward(forward_info) => {
3596 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3597 to_forward_infos.push((forward_info, htlc.htlc_id));
3598 htlc.state = InboundHTLCState::Committed;
3604 for htlc in pending_outbound_htlcs.iter_mut() {
3605 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3606 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3607 htlc.state = OutboundHTLCState::Committed;
3608 *expecting_peer_commitment_signed = true;
3610 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3611 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3612 // Grab the preimage, if it exists, instead of cloning
3613 let mut reason = OutboundHTLCOutcome::Success(None);
3614 mem::swap(outcome, &mut reason);
3615 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3616 require_commitment = true;
3620 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3622 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3623 match update_state {
3624 FeeUpdateState::Outbound => {
3625 debug_assert!(self.context.is_outbound());
3626 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3627 self.context.feerate_per_kw = feerate;
3628 self.context.pending_update_fee = None;
3629 self.context.expecting_peer_commitment_signed = true;
3631 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3632 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3633 debug_assert!(!self.context.is_outbound());
3634 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3635 require_commitment = true;
3636 self.context.feerate_per_kw = feerate;
3637 self.context.pending_update_fee = None;
3642 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3643 let release_state_str =
3644 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3645 macro_rules! return_with_htlcs_to_fail {
3646 ($htlcs_to_fail: expr) => {
3647 if !release_monitor {
3648 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3649 update: monitor_update,
3651 return Ok(($htlcs_to_fail, None));
3653 return Ok(($htlcs_to_fail, Some(monitor_update)));
3658 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3659 // We can't actually generate a new commitment transaction (incl by freeing holding
3660 // cells) while we can't update the monitor, so we just return what we have.
3661 if require_commitment {
3662 self.context.monitor_pending_commitment_signed = true;
3663 // When the monitor updating is restored we'll call
3664 // get_last_commitment_update_for_send(), which does not update state, but we're
3665 // definitely now awaiting a remote revoke before we can step forward any more, so
3667 let mut additional_update = self.build_commitment_no_status_check(logger);
3668 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3669 // strictly increasing by one, so decrement it here.
3670 self.context.latest_monitor_update_id = monitor_update.update_id;
3671 monitor_update.updates.append(&mut additional_update.updates);
3673 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3674 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3675 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3676 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3677 return_with_htlcs_to_fail!(Vec::new());
3680 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3681 (Some(mut additional_update), htlcs_to_fail) => {
3682 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3683 // strictly increasing by one, so decrement it here.
3684 self.context.latest_monitor_update_id = monitor_update.update_id;
3685 monitor_update.updates.append(&mut additional_update.updates);
3687 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3688 &self.context.channel_id(), release_state_str);
3690 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3691 return_with_htlcs_to_fail!(htlcs_to_fail);
3693 (None, htlcs_to_fail) => {
3694 if require_commitment {
3695 let mut additional_update = self.build_commitment_no_status_check(logger);
3697 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3698 // strictly increasing by one, so decrement it here.
3699 self.context.latest_monitor_update_id = monitor_update.update_id;
3700 monitor_update.updates.append(&mut additional_update.updates);
3702 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3703 &self.context.channel_id(),
3704 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3707 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3708 return_with_htlcs_to_fail!(htlcs_to_fail);
3710 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3711 &self.context.channel_id(), release_state_str);
3713 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3714 return_with_htlcs_to_fail!(htlcs_to_fail);
3720 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3721 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3722 /// commitment update.
3723 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3724 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3725 where F::Target: FeeEstimator, L::Target: Logger
3727 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3728 assert!(msg_opt.is_none(), "We forced holding cell?");
3731 /// Adds a pending update to this channel. See the doc for send_htlc for
3732 /// further details on the optionness of the return value.
3733 /// If our balance is too low to cover the cost of the next commitment transaction at the
3734 /// new feerate, the update is cancelled.
3736 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3737 /// [`Channel`] if `force_holding_cell` is false.
3738 fn send_update_fee<F: Deref, L: Deref>(
3739 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3740 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3741 ) -> Option<msgs::UpdateFee>
3742 where F::Target: FeeEstimator, L::Target: Logger
3744 if !self.context.is_outbound() {
3745 panic!("Cannot send fee from inbound channel");
3747 if !self.context.is_usable() {
3748 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3750 if !self.context.is_live() {
3751 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3754 // Before proposing a feerate update, check that we can actually afford the new fee.
3755 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3756 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3757 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3758 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3759 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3760 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3761 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3762 //TODO: auto-close after a number of failures?
3763 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3767 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3768 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3769 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3770 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3771 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3772 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3775 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3776 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3780 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3781 force_holding_cell = true;
3784 if force_holding_cell {
3785 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3789 debug_assert!(self.context.pending_update_fee.is_none());
3790 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3792 Some(msgs::UpdateFee {
3793 channel_id: self.context.channel_id,
3798 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3799 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3801 /// No further message handling calls may be made until a channel_reestablish dance has
3803 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3804 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3805 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3806 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3810 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3811 // While the below code should be idempotent, it's simpler to just return early, as
3812 // redundant disconnect events can fire, though they should be rare.
3816 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3817 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3820 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3821 // will be retransmitted.
3822 self.context.last_sent_closing_fee = None;
3823 self.context.pending_counterparty_closing_signed = None;
3824 self.context.closing_fee_limits = None;
3826 let mut inbound_drop_count = 0;
3827 self.context.pending_inbound_htlcs.retain(|htlc| {
3829 InboundHTLCState::RemoteAnnounced(_) => {
3830 // They sent us an update_add_htlc but we never got the commitment_signed.
3831 // We'll tell them what commitment_signed we're expecting next and they'll drop
3832 // this HTLC accordingly
3833 inbound_drop_count += 1;
3836 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3837 // We received a commitment_signed updating this HTLC and (at least hopefully)
3838 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3839 // in response to it yet, so don't touch it.
3842 InboundHTLCState::Committed => true,
3843 InboundHTLCState::LocalRemoved(_) => {
3844 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3845 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3846 // (that we missed). Keep this around for now and if they tell us they missed
3847 // the commitment_signed we can re-transmit the update then.
3852 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3854 if let Some((_, update_state)) = self.context.pending_update_fee {
3855 if update_state == FeeUpdateState::RemoteAnnounced {
3856 debug_assert!(!self.context.is_outbound());
3857 self.context.pending_update_fee = None;
3861 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3862 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3863 // They sent us an update to remove this but haven't yet sent the corresponding
3864 // commitment_signed, we need to move it back to Committed and they can re-send
3865 // the update upon reconnection.
3866 htlc.state = OutboundHTLCState::Committed;
3870 self.context.sent_message_awaiting_response = None;
3872 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3873 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3877 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3878 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3879 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3880 /// update completes (potentially immediately).
3881 /// The messages which were generated with the monitor update must *not* have been sent to the
3882 /// remote end, and must instead have been dropped. They will be regenerated when
3883 /// [`Self::monitor_updating_restored`] is called.
3885 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3886 /// [`chain::Watch`]: crate::chain::Watch
3887 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3888 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3889 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3890 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3891 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3893 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3894 self.context.monitor_pending_commitment_signed |= resend_commitment;
3895 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3896 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3897 self.context.monitor_pending_failures.append(&mut pending_fails);
3898 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3899 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3902 /// Indicates that the latest ChannelMonitor update has been committed by the client
3903 /// successfully and we should restore normal operation. Returns messages which should be sent
3904 /// to the remote side.
3905 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3906 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3907 user_config: &UserConfig, best_block_height: u32
3908 ) -> MonitorRestoreUpdates
3911 NS::Target: NodeSigner
3913 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3914 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3916 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3917 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3918 // first received the funding_signed.
3919 let mut funding_broadcastable =
3920 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3921 self.context.funding_transaction.take()
3923 // That said, if the funding transaction is already confirmed (ie we're active with a
3924 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3925 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3926 funding_broadcastable = None;
3929 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3930 // (and we assume the user never directly broadcasts the funding transaction and waits for
3931 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3932 // * an inbound channel that failed to persist the monitor on funding_created and we got
3933 // the funding transaction confirmed before the monitor was persisted, or
3934 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3935 let channel_ready = if self.context.monitor_pending_channel_ready {
3936 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3937 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3938 self.context.monitor_pending_channel_ready = false;
3939 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3940 Some(msgs::ChannelReady {
3941 channel_id: self.context.channel_id(),
3942 next_per_commitment_point,
3943 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3947 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3949 let mut accepted_htlcs = Vec::new();
3950 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3951 let mut failed_htlcs = Vec::new();
3952 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3953 let mut finalized_claimed_htlcs = Vec::new();
3954 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3956 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3957 self.context.monitor_pending_revoke_and_ack = false;
3958 self.context.monitor_pending_commitment_signed = false;
3959 return MonitorRestoreUpdates {
3960 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3961 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3965 let raa = if self.context.monitor_pending_revoke_and_ack {
3966 Some(self.get_last_revoke_and_ack())
3968 let commitment_update = if self.context.monitor_pending_commitment_signed {
3969 self.get_last_commitment_update_for_send(logger).ok()
3971 if commitment_update.is_some() {
3972 self.mark_awaiting_response();
3975 self.context.monitor_pending_revoke_and_ack = false;
3976 self.context.monitor_pending_commitment_signed = false;
3977 let order = self.context.resend_order.clone();
3978 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3979 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3980 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3981 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3982 MonitorRestoreUpdates {
3983 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3987 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3988 where F::Target: FeeEstimator, L::Target: Logger
3990 if self.context.is_outbound() {
3991 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3993 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3994 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3996 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3998 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3999 self.context.update_time_counter += 1;
4000 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4001 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4002 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4003 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4004 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4005 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4006 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4007 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4008 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4009 msg.feerate_per_kw, holder_tx_dust_exposure)));
4011 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4012 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4013 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4019 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4022 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4023 let commitment_update = if self.context.signer_pending_commitment_update {
4024 self.get_last_commitment_update_for_send(logger).ok()
4026 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4027 self.context.get_funding_signed_msg(logger).1
4029 let channel_ready = if funding_signed.is_some() {
4030 self.check_get_channel_ready(0)
4032 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4033 self.context.get_funding_created_msg(logger)
4036 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4037 if commitment_update.is_some() { "a" } else { "no" },
4038 if funding_signed.is_some() { "a" } else { "no" },
4039 if funding_created.is_some() { "a" } else { "no" },
4040 if channel_ready.is_some() { "a" } else { "no" });
4042 SignerResumeUpdates {
4050 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4051 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4052 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4053 msgs::RevokeAndACK {
4054 channel_id: self.context.channel_id,
4055 per_commitment_secret,
4056 next_per_commitment_point,
4058 next_local_nonce: None,
4062 /// Gets the last commitment update for immediate sending to our peer.
4063 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4064 let mut update_add_htlcs = Vec::new();
4065 let mut update_fulfill_htlcs = Vec::new();
4066 let mut update_fail_htlcs = Vec::new();
4067 let mut update_fail_malformed_htlcs = Vec::new();
4069 for htlc in self.context.pending_outbound_htlcs.iter() {
4070 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4071 update_add_htlcs.push(msgs::UpdateAddHTLC {
4072 channel_id: self.context.channel_id(),
4073 htlc_id: htlc.htlc_id,
4074 amount_msat: htlc.amount_msat,
4075 payment_hash: htlc.payment_hash,
4076 cltv_expiry: htlc.cltv_expiry,
4077 onion_routing_packet: (**onion_packet).clone(),
4078 skimmed_fee_msat: htlc.skimmed_fee_msat,
4079 blinding_point: None,
4084 for htlc in self.context.pending_inbound_htlcs.iter() {
4085 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4087 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4088 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4089 channel_id: self.context.channel_id(),
4090 htlc_id: htlc.htlc_id,
4091 reason: err_packet.clone()
4094 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4095 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4096 channel_id: self.context.channel_id(),
4097 htlc_id: htlc.htlc_id,
4098 sha256_of_onion: sha256_of_onion.clone(),
4099 failure_code: failure_code.clone(),
4102 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4103 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4104 channel_id: self.context.channel_id(),
4105 htlc_id: htlc.htlc_id,
4106 payment_preimage: payment_preimage.clone(),
4113 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4114 Some(msgs::UpdateFee {
4115 channel_id: self.context.channel_id(),
4116 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4120 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4121 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4122 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4123 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4124 if self.context.signer_pending_commitment_update {
4125 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4126 self.context.signer_pending_commitment_update = false;
4130 if !self.context.signer_pending_commitment_update {
4131 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4132 self.context.signer_pending_commitment_update = true;
4136 Ok(msgs::CommitmentUpdate {
4137 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4142 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4143 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4144 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4145 assert!(self.context.shutdown_scriptpubkey.is_some());
4146 Some(msgs::Shutdown {
4147 channel_id: self.context.channel_id,
4148 scriptpubkey: self.get_closing_scriptpubkey(),
4153 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4154 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4156 /// Some links printed in log lines are included here to check them during build (when run with
4157 /// `cargo doc --document-private-items`):
4158 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4159 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4160 pub fn channel_reestablish<L: Deref, NS: Deref>(
4161 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4162 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4163 ) -> Result<ReestablishResponses, ChannelError>
4166 NS::Target: NodeSigner
4168 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4169 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4170 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4171 // just close here instead of trying to recover.
4172 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4175 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4176 msg.next_local_commitment_number == 0 {
4177 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4180 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4181 if msg.next_remote_commitment_number > 0 {
4182 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4183 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4184 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4185 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4186 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4188 if msg.next_remote_commitment_number > our_commitment_transaction {
4189 macro_rules! log_and_panic {
4190 ($err_msg: expr) => {
4191 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4192 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4195 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4196 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4197 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4198 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4199 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4200 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4201 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4202 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4206 // Before we change the state of the channel, we check if the peer is sending a very old
4207 // commitment transaction number, if yes we send a warning message.
4208 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4209 return Err(ChannelError::Warn(format!(
4210 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4211 msg.next_remote_commitment_number,
4212 our_commitment_transaction
4216 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4217 // remaining cases either succeed or ErrorMessage-fail).
4218 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4219 self.context.sent_message_awaiting_response = None;
4221 let shutdown_msg = self.get_outbound_shutdown();
4223 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4225 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4226 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4227 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4228 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4229 if msg.next_remote_commitment_number != 0 {
4230 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4232 // Short circuit the whole handler as there is nothing we can resend them
4233 return Ok(ReestablishResponses {
4234 channel_ready: None,
4235 raa: None, commitment_update: None,
4236 order: RAACommitmentOrder::CommitmentFirst,
4237 shutdown_msg, announcement_sigs,
4241 // We have OurChannelReady set!
4242 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4243 return Ok(ReestablishResponses {
4244 channel_ready: Some(msgs::ChannelReady {
4245 channel_id: self.context.channel_id(),
4246 next_per_commitment_point,
4247 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4249 raa: None, commitment_update: None,
4250 order: RAACommitmentOrder::CommitmentFirst,
4251 shutdown_msg, announcement_sigs,
4255 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4256 // Remote isn't waiting on any RevokeAndACK from us!
4257 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4259 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4260 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4261 self.context.monitor_pending_revoke_and_ack = true;
4264 Some(self.get_last_revoke_and_ack())
4267 debug_assert!(false, "All values should have been handled in the four cases above");
4268 return Err(ChannelError::Close(format!(
4269 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4270 msg.next_remote_commitment_number,
4271 our_commitment_transaction
4275 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4276 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4277 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4278 // the corresponding revoke_and_ack back yet.
4279 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4280 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4281 self.mark_awaiting_response();
4283 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4285 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4286 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4287 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4288 Some(msgs::ChannelReady {
4289 channel_id: self.context.channel_id(),
4290 next_per_commitment_point,
4291 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4295 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4296 if required_revoke.is_some() {
4297 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4299 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4302 Ok(ReestablishResponses {
4303 channel_ready, shutdown_msg, announcement_sigs,
4304 raa: required_revoke,
4305 commitment_update: None,
4306 order: self.context.resend_order.clone(),
4308 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4309 if required_revoke.is_some() {
4310 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4312 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4315 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4316 self.context.monitor_pending_commitment_signed = true;
4317 Ok(ReestablishResponses {
4318 channel_ready, shutdown_msg, announcement_sigs,
4319 commitment_update: None, raa: None,
4320 order: self.context.resend_order.clone(),
4323 Ok(ReestablishResponses {
4324 channel_ready, shutdown_msg, announcement_sigs,
4325 raa: required_revoke,
4326 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4327 order: self.context.resend_order.clone(),
4330 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4331 Err(ChannelError::Close(format!(
4332 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4333 msg.next_local_commitment_number,
4334 next_counterparty_commitment_number,
4337 Err(ChannelError::Close(format!(
4338 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4339 msg.next_local_commitment_number,
4340 next_counterparty_commitment_number,
4345 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4346 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4347 /// at which point they will be recalculated.
4348 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4350 where F::Target: FeeEstimator
4352 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4354 // Propose a range from our current Background feerate to our Normal feerate plus our
4355 // force_close_avoidance_max_fee_satoshis.
4356 // If we fail to come to consensus, we'll have to force-close.
4357 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4358 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4359 // that we don't expect to need fee bumping
4360 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4361 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4363 // The spec requires that (when the channel does not have anchors) we only send absolute
4364 // channel fees no greater than the absolute channel fee on the current commitment
4365 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4366 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4367 // some force-closure by old nodes, but we wanted to close the channel anyway.
4369 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4370 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4371 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4372 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4375 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4376 // below our dust limit, causing the output to disappear. We don't bother handling this
4377 // case, however, as this should only happen if a channel is closed before any (material)
4378 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4379 // come to consensus with our counterparty on appropriate fees, however it should be a
4380 // relatively rare case. We can revisit this later, though note that in order to determine
4381 // if the funders' output is dust we have to know the absolute fee we're going to use.
4382 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4383 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4384 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4385 // We always add force_close_avoidance_max_fee_satoshis to our normal
4386 // feerate-calculated fee, but allow the max to be overridden if we're using a
4387 // target feerate-calculated fee.
4388 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4389 proposed_max_feerate as u64 * tx_weight / 1000)
4391 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4394 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4395 self.context.closing_fee_limits.clone().unwrap()
4398 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4399 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4400 /// this point if we're the funder we should send the initial closing_signed, and in any case
4401 /// shutdown should complete within a reasonable timeframe.
4402 fn closing_negotiation_ready(&self) -> bool {
4403 self.context.closing_negotiation_ready()
4406 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4407 /// an Err if no progress is being made and the channel should be force-closed instead.
4408 /// Should be called on a one-minute timer.
4409 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4410 if self.closing_negotiation_ready() {
4411 if self.context.closing_signed_in_flight {
4412 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4414 self.context.closing_signed_in_flight = true;
4420 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4421 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4422 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4423 where F::Target: FeeEstimator, L::Target: Logger
4425 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4426 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4427 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4428 // that closing_negotiation_ready checks this case (as well as a few others).
4429 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4430 return Ok((None, None, None));
4433 if !self.context.is_outbound() {
4434 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4435 return self.closing_signed(fee_estimator, &msg);
4437 return Ok((None, None, None));
4440 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4441 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4442 if self.context.expecting_peer_commitment_signed {
4443 return Ok((None, None, None));
4446 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4448 assert!(self.context.shutdown_scriptpubkey.is_some());
4449 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4450 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4451 our_min_fee, our_max_fee, total_fee_satoshis);
4453 match &self.context.holder_signer {
4454 ChannelSignerType::Ecdsa(ecdsa) => {
4456 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4457 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4459 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4460 Ok((Some(msgs::ClosingSigned {
4461 channel_id: self.context.channel_id,
4462 fee_satoshis: total_fee_satoshis,
4464 fee_range: Some(msgs::ClosingSignedFeeRange {
4465 min_fee_satoshis: our_min_fee,
4466 max_fee_satoshis: our_max_fee,
4470 // TODO (taproot|arik)
4476 // Marks a channel as waiting for a response from the counterparty. If it's not received
4477 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4479 fn mark_awaiting_response(&mut self) {
4480 self.context.sent_message_awaiting_response = Some(0);
4483 /// Determines whether we should disconnect the counterparty due to not receiving a response
4484 /// within our expected timeframe.
4486 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4487 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4488 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4491 // Don't disconnect when we're not waiting on a response.
4494 *ticks_elapsed += 1;
4495 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4499 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4500 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4502 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4503 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4505 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4506 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4507 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4508 // can do that via error message without getting a connection fail anyway...
4509 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4511 for htlc in self.context.pending_inbound_htlcs.iter() {
4512 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4513 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4516 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4518 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4519 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4522 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4523 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4524 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4527 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4530 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4531 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4532 // any further commitment updates after we set LocalShutdownSent.
4533 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4535 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4538 assert!(send_shutdown);
4539 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4540 Ok(scriptpubkey) => scriptpubkey,
4541 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4543 if !shutdown_scriptpubkey.is_compatible(their_features) {
4544 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4546 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4551 // From here on out, we may not fail!
4553 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4554 self.context.update_time_counter += 1;
4556 let monitor_update = if update_shutdown_script {
4557 self.context.latest_monitor_update_id += 1;
4558 let monitor_update = ChannelMonitorUpdate {
4559 update_id: self.context.latest_monitor_update_id,
4560 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4561 scriptpubkey: self.get_closing_scriptpubkey(),
4564 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4565 self.push_ret_blockable_mon_update(monitor_update)
4567 let shutdown = if send_shutdown {
4568 Some(msgs::Shutdown {
4569 channel_id: self.context.channel_id,
4570 scriptpubkey: self.get_closing_scriptpubkey(),
4574 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4575 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4576 // cell HTLCs and return them to fail the payment.
4577 self.context.holding_cell_update_fee = None;
4578 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4579 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4581 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4582 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4589 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4590 self.context.update_time_counter += 1;
4592 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4595 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4596 let mut tx = closing_tx.trust().built_transaction().clone();
4598 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4600 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4601 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4602 let mut holder_sig = sig.serialize_der().to_vec();
4603 holder_sig.push(EcdsaSighashType::All as u8);
4604 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4605 cp_sig.push(EcdsaSighashType::All as u8);
4606 if funding_key[..] < counterparty_funding_key[..] {
4607 tx.input[0].witness.push(holder_sig);
4608 tx.input[0].witness.push(cp_sig);
4610 tx.input[0].witness.push(cp_sig);
4611 tx.input[0].witness.push(holder_sig);
4614 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4618 pub fn closing_signed<F: Deref>(
4619 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4620 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4621 where F::Target: FeeEstimator
4623 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4624 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4626 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4627 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4629 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4630 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4632 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4633 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4636 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4637 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4640 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4641 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4642 return Ok((None, None, None));
4645 let funding_redeemscript = self.context.get_funding_redeemscript();
4646 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4647 if used_total_fee != msg.fee_satoshis {
4648 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4650 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4652 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4655 // The remote end may have decided to revoke their output due to inconsistent dust
4656 // limits, so check for that case by re-checking the signature here.
4657 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4658 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4659 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4663 for outp in closing_tx.trust().built_transaction().output.iter() {
4664 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4665 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4669 assert!(self.context.shutdown_scriptpubkey.is_some());
4670 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4671 if last_fee == msg.fee_satoshis {
4672 let shutdown_result = ShutdownResult {
4673 monitor_update: None,
4674 dropped_outbound_htlcs: Vec::new(),
4675 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4677 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4678 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4679 self.context.update_time_counter += 1;
4680 return Ok((None, Some(tx), Some(shutdown_result)));
4684 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4686 macro_rules! propose_fee {
4687 ($new_fee: expr) => {
4688 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4689 (closing_tx, $new_fee)
4691 self.build_closing_transaction($new_fee, false)
4694 return match &self.context.holder_signer {
4695 ChannelSignerType::Ecdsa(ecdsa) => {
4697 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4698 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4699 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4700 let shutdown_result = ShutdownResult {
4701 monitor_update: None,
4702 dropped_outbound_htlcs: Vec::new(),
4703 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4705 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4706 self.context.update_time_counter += 1;
4707 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4708 (Some(tx), Some(shutdown_result))
4713 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4714 Ok((Some(msgs::ClosingSigned {
4715 channel_id: self.context.channel_id,
4716 fee_satoshis: used_fee,
4718 fee_range: Some(msgs::ClosingSignedFeeRange {
4719 min_fee_satoshis: our_min_fee,
4720 max_fee_satoshis: our_max_fee,
4722 }), signed_tx, shutdown_result))
4724 // TODO (taproot|arik)
4731 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4732 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4733 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4735 if max_fee_satoshis < our_min_fee {
4736 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4738 if min_fee_satoshis > our_max_fee {
4739 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4742 if !self.context.is_outbound() {
4743 // They have to pay, so pick the highest fee in the overlapping range.
4744 // We should never set an upper bound aside from their full balance
4745 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4746 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4748 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4749 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4750 msg.fee_satoshis, our_min_fee, our_max_fee)));
4752 // The proposed fee is in our acceptable range, accept it and broadcast!
4753 propose_fee!(msg.fee_satoshis);
4756 // Old fee style negotiation. We don't bother to enforce whether they are complying
4757 // with the "making progress" requirements, we just comply and hope for the best.
4758 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4759 if msg.fee_satoshis > last_fee {
4760 if msg.fee_satoshis < our_max_fee {
4761 propose_fee!(msg.fee_satoshis);
4762 } else if last_fee < our_max_fee {
4763 propose_fee!(our_max_fee);
4765 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4768 if msg.fee_satoshis > our_min_fee {
4769 propose_fee!(msg.fee_satoshis);
4770 } else if last_fee > our_min_fee {
4771 propose_fee!(our_min_fee);
4773 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4777 if msg.fee_satoshis < our_min_fee {
4778 propose_fee!(our_min_fee);
4779 } else if msg.fee_satoshis > our_max_fee {
4780 propose_fee!(our_max_fee);
4782 propose_fee!(msg.fee_satoshis);
4788 fn internal_htlc_satisfies_config(
4789 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4790 ) -> Result<(), (&'static str, u16)> {
4791 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4792 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4793 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4794 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4796 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4797 0x1000 | 12, // fee_insufficient
4800 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4802 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4803 0x1000 | 13, // incorrect_cltv_expiry
4809 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4810 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4811 /// unsuccessful, falls back to the previous one if one exists.
4812 pub fn htlc_satisfies_config(
4813 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4814 ) -> Result<(), (&'static str, u16)> {
4815 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4817 if let Some(prev_config) = self.context.prev_config() {
4818 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4825 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4826 self.context.cur_holder_commitment_transaction_number + 1
4829 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4830 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4833 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4834 self.context.cur_counterparty_commitment_transaction_number + 2
4838 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4839 &self.context.holder_signer
4843 pub fn get_value_stat(&self) -> ChannelValueStat {
4845 value_to_self_msat: self.context.value_to_self_msat,
4846 channel_value_msat: self.context.channel_value_satoshis * 1000,
4847 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4848 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4849 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4850 holding_cell_outbound_amount_msat: {
4852 for h in self.context.holding_cell_htlc_updates.iter() {
4854 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4862 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4863 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4867 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4868 /// Allowed in any state (including after shutdown)
4869 pub fn is_awaiting_monitor_update(&self) -> bool {
4870 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4873 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4874 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4875 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4876 self.context.blocked_monitor_updates[0].update.update_id - 1
4879 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4880 /// further blocked monitor update exists after the next.
4881 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4882 if self.context.blocked_monitor_updates.is_empty() { return None; }
4883 Some((self.context.blocked_monitor_updates.remove(0).update,
4884 !self.context.blocked_monitor_updates.is_empty()))
4887 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4888 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4889 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4890 -> Option<ChannelMonitorUpdate> {
4891 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4892 if !release_monitor {
4893 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4902 pub fn blocked_monitor_updates_pending(&self) -> usize {
4903 self.context.blocked_monitor_updates.len()
4906 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4907 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4908 /// transaction. If the channel is inbound, this implies simply that the channel has not
4910 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4911 if !self.is_awaiting_monitor_update() { return false; }
4912 if self.context.channel_state &
4913 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4914 == ChannelState::FundingSent as u32 {
4915 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4916 // FundingSent set, though our peer could have sent their channel_ready.
4917 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4920 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4921 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4922 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4923 // waiting for the initial monitor persistence. Thus, we check if our commitment
4924 // transaction numbers have both been iterated only exactly once (for the
4925 // funding_signed), and we're awaiting monitor update.
4927 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4928 // only way to get an awaiting-monitor-update state during initial funding is if the
4929 // initial monitor persistence is still pending).
4931 // Because deciding we're awaiting initial broadcast spuriously could result in
4932 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4933 // we hard-assert here, even in production builds.
4934 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4935 assert!(self.context.monitor_pending_channel_ready);
4936 assert_eq!(self.context.latest_monitor_update_id, 0);
4942 /// Returns true if our channel_ready has been sent
4943 pub fn is_our_channel_ready(&self) -> bool {
4944 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4947 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4948 pub fn received_shutdown(&self) -> bool {
4949 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4952 /// Returns true if we either initiated or agreed to shut down the channel.
4953 pub fn sent_shutdown(&self) -> bool {
4954 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4957 /// Returns true if this channel is fully shut down. True here implies that no further actions
4958 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4959 /// will be handled appropriately by the chain monitor.
4960 pub fn is_shutdown(&self) -> bool {
4961 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4962 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4967 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4968 self.context.channel_update_status
4971 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4972 self.context.update_time_counter += 1;
4973 self.context.channel_update_status = status;
4976 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4978 // * always when a new block/transactions are confirmed with the new height
4979 // * when funding is signed with a height of 0
4980 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4984 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4985 if funding_tx_confirmations <= 0 {
4986 self.context.funding_tx_confirmation_height = 0;
4989 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4993 // If we're still pending the signature on a funding transaction, then we're not ready to send a
4994 // channel_ready yet.
4995 if self.context.signer_pending_funding {
4999 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5000 // channel_ready until the entire batch is ready.
5001 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5002 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5003 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5005 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5006 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5007 self.context.update_time_counter += 1;
5009 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5010 // We got a reorg but not enough to trigger a force close, just ignore.
5013 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5014 // We should never see a funding transaction on-chain until we've received
5015 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5016 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5017 // however, may do this and we shouldn't treat it as a bug.
5018 #[cfg(not(fuzzing))]
5019 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5020 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5021 self.context.channel_state);
5023 // We got a reorg but not enough to trigger a force close, just ignore.
5027 if need_commitment_update {
5028 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5029 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5030 let next_per_commitment_point =
5031 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5032 return Some(msgs::ChannelReady {
5033 channel_id: self.context.channel_id,
5034 next_per_commitment_point,
5035 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5039 self.context.monitor_pending_channel_ready = true;
5045 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5046 /// In the first case, we store the confirmation height and calculating the short channel id.
5047 /// In the second, we simply return an Err indicating we need to be force-closed now.
5048 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5049 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5050 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5051 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5053 NS::Target: NodeSigner,
5056 let mut msgs = (None, None);
5057 if let Some(funding_txo) = self.context.get_funding_txo() {
5058 for &(index_in_block, tx) in txdata.iter() {
5059 // Check if the transaction is the expected funding transaction, and if it is,
5060 // check that it pays the right amount to the right script.
5061 if self.context.funding_tx_confirmation_height == 0 {
5062 if tx.txid() == funding_txo.txid {
5063 let txo_idx = funding_txo.index as usize;
5064 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5065 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5066 if self.context.is_outbound() {
5067 // If we generated the funding transaction and it doesn't match what it
5068 // should, the client is really broken and we should just panic and
5069 // tell them off. That said, because hash collisions happen with high
5070 // probability in fuzzing mode, if we're fuzzing we just close the
5071 // channel and move on.
5072 #[cfg(not(fuzzing))]
5073 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5075 self.context.update_time_counter += 1;
5076 let err_reason = "funding tx had wrong script/value or output index";
5077 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5079 if self.context.is_outbound() {
5080 if !tx.is_coin_base() {
5081 for input in tx.input.iter() {
5082 if input.witness.is_empty() {
5083 // We generated a malleable funding transaction, implying we've
5084 // just exposed ourselves to funds loss to our counterparty.
5085 #[cfg(not(fuzzing))]
5086 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5091 self.context.funding_tx_confirmation_height = height;
5092 self.context.funding_tx_confirmed_in = Some(*block_hash);
5093 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5094 Ok(scid) => Some(scid),
5095 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5098 // If this is a coinbase transaction and not a 0-conf channel
5099 // we should update our min_depth to 100 to handle coinbase maturity
5100 if tx.is_coin_base() &&
5101 self.context.minimum_depth.unwrap_or(0) > 0 &&
5102 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5103 self.context.minimum_depth = Some(COINBASE_MATURITY);
5106 // If we allow 1-conf funding, we may need to check for channel_ready here and
5107 // send it immediately instead of waiting for a best_block_updated call (which
5108 // may have already happened for this block).
5109 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5110 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5111 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5112 msgs = (Some(channel_ready), announcement_sigs);
5115 for inp in tx.input.iter() {
5116 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5117 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5118 return Err(ClosureReason::CommitmentTxConfirmed);
5126 /// When a new block is connected, we check the height of the block against outbound holding
5127 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5128 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5129 /// handled by the ChannelMonitor.
5131 /// If we return Err, the channel may have been closed, at which point the standard
5132 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5135 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5137 pub fn best_block_updated<NS: Deref, L: Deref>(
5138 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5139 node_signer: &NS, user_config: &UserConfig, logger: &L
5140 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5142 NS::Target: NodeSigner,
5145 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5148 fn do_best_block_updated<NS: Deref, L: Deref>(
5149 &mut self, height: u32, highest_header_time: u32,
5150 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5151 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5153 NS::Target: NodeSigner,
5156 let mut timed_out_htlcs = Vec::new();
5157 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5158 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5160 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5161 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5163 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5164 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5165 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5173 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5175 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5176 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5177 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5179 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5180 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5183 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5184 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5185 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5186 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5187 if self.context.funding_tx_confirmation_height == 0 {
5188 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5189 // zero if it has been reorged out, however in either case, our state flags
5190 // indicate we've already sent a channel_ready
5191 funding_tx_confirmations = 0;
5194 // If we've sent channel_ready (or have both sent and received channel_ready), and
5195 // the funding transaction has become unconfirmed,
5196 // close the channel and hope we can get the latest state on chain (because presumably
5197 // the funding transaction is at least still in the mempool of most nodes).
5199 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5200 // 0-conf channel, but not doing so may lead to the
5201 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5203 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5204 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5205 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5206 return Err(ClosureReason::ProcessingError { err: err_reason });
5208 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5209 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5210 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5211 // If funding_tx_confirmed_in is unset, the channel must not be active
5212 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5213 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5214 return Err(ClosureReason::FundingTimedOut);
5217 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5218 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5220 Ok((None, timed_out_htlcs, announcement_sigs))
5223 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5224 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5225 /// before the channel has reached channel_ready and we can just wait for more blocks.
5226 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5227 if self.context.funding_tx_confirmation_height != 0 {
5228 // We handle the funding disconnection by calling best_block_updated with a height one
5229 // below where our funding was connected, implying a reorg back to conf_height - 1.
5230 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5231 // We use the time field to bump the current time we set on channel updates if its
5232 // larger. If we don't know that time has moved forward, we can just set it to the last
5233 // time we saw and it will be ignored.
5234 let best_time = self.context.update_time_counter;
5235 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5236 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5237 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5238 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5239 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5245 // We never learned about the funding confirmation anyway, just ignore
5250 // Methods to get unprompted messages to send to the remote end (or where we already returned
5251 // something in the handler for the message that prompted this message):
5253 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5254 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5255 /// directions). Should be used for both broadcasted announcements and in response to an
5256 /// AnnouncementSignatures message from the remote peer.
5258 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5261 /// This will only return ChannelError::Ignore upon failure.
5263 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5264 fn get_channel_announcement<NS: Deref>(
5265 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5266 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5267 if !self.context.config.announced_channel {
5268 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5270 if !self.context.is_usable() {
5271 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5274 let short_channel_id = self.context.get_short_channel_id()
5275 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5276 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5277 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5278 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5279 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5281 let msg = msgs::UnsignedChannelAnnouncement {
5282 features: channelmanager::provided_channel_features(&user_config),
5285 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5286 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5287 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5288 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5289 excess_data: Vec::new(),
5295 fn get_announcement_sigs<NS: Deref, L: Deref>(
5296 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5297 best_block_height: u32, logger: &L
5298 ) -> Option<msgs::AnnouncementSignatures>
5300 NS::Target: NodeSigner,
5303 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5307 if !self.context.is_usable() {
5311 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5312 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5316 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5320 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5321 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5324 log_trace!(logger, "{:?}", e);
5328 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5330 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5335 match &self.context.holder_signer {
5336 ChannelSignerType::Ecdsa(ecdsa) => {
5337 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5339 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5344 let short_channel_id = match self.context.get_short_channel_id() {
5346 None => return None,
5349 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5351 Some(msgs::AnnouncementSignatures {
5352 channel_id: self.context.channel_id(),
5354 node_signature: our_node_sig,
5355 bitcoin_signature: our_bitcoin_sig,
5358 // TODO (taproot|arik)
5364 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5366 fn sign_channel_announcement<NS: Deref>(
5367 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5368 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5369 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5370 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5371 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5372 let were_node_one = announcement.node_id_1 == our_node_key;
5374 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5375 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5376 match &self.context.holder_signer {
5377 ChannelSignerType::Ecdsa(ecdsa) => {
5378 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5379 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5380 Ok(msgs::ChannelAnnouncement {
5381 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5382 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5383 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5384 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5385 contents: announcement,
5388 // TODO (taproot|arik)
5393 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5397 /// Processes an incoming announcement_signatures message, providing a fully-signed
5398 /// channel_announcement message which we can broadcast and storing our counterparty's
5399 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5400 pub fn announcement_signatures<NS: Deref>(
5401 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5402 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5403 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5404 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5406 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5408 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5409 return Err(ChannelError::Close(format!(
5410 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5411 &announcement, self.context.get_counterparty_node_id())));
5413 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5414 return Err(ChannelError::Close(format!(
5415 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5416 &announcement, self.context.counterparty_funding_pubkey())));
5419 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5420 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5421 return Err(ChannelError::Ignore(
5422 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5425 self.sign_channel_announcement(node_signer, announcement)
5428 /// Gets a signed channel_announcement for this channel, if we previously received an
5429 /// announcement_signatures from our counterparty.
5430 pub fn get_signed_channel_announcement<NS: Deref>(
5431 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5432 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5433 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5436 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5438 Err(_) => return None,
5440 match self.sign_channel_announcement(node_signer, announcement) {
5441 Ok(res) => Some(res),
5446 /// May panic if called on a channel that wasn't immediately-previously
5447 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5448 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5449 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5450 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5451 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5452 // current to_remote balances. However, it no longer has any use, and thus is now simply
5453 // set to a dummy (but valid, as required by the spec) public key.
5454 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5455 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5456 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5457 let mut pk = [2; 33]; pk[1] = 0xff;
5458 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5459 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5460 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5461 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5464 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5467 self.mark_awaiting_response();
5468 msgs::ChannelReestablish {
5469 channel_id: self.context.channel_id(),
5470 // The protocol has two different commitment number concepts - the "commitment
5471 // transaction number", which starts from 0 and counts up, and the "revocation key
5472 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5473 // commitment transaction numbers by the index which will be used to reveal the
5474 // revocation key for that commitment transaction, which means we have to convert them
5475 // to protocol-level commitment numbers here...
5477 // next_local_commitment_number is the next commitment_signed number we expect to
5478 // receive (indicating if they need to resend one that we missed).
5479 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5480 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5481 // receive, however we track it by the next commitment number for a remote transaction
5482 // (which is one further, as they always revoke previous commitment transaction, not
5483 // the one we send) so we have to decrement by 1. Note that if
5484 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5485 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5487 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5488 your_last_per_commitment_secret: remote_last_secret,
5489 my_current_per_commitment_point: dummy_pubkey,
5490 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5491 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5492 // txid of that interactive transaction, else we MUST NOT set it.
5493 next_funding_txid: None,
5498 // Send stuff to our remote peers:
5500 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5501 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5502 /// commitment update.
5504 /// `Err`s will only be [`ChannelError::Ignore`].
5505 pub fn queue_add_htlc<F: Deref, L: Deref>(
5506 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5507 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5508 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5509 ) -> Result<(), ChannelError>
5510 where F::Target: FeeEstimator, L::Target: Logger
5513 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5514 skimmed_fee_msat, fee_estimator, logger)
5515 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5517 if let ChannelError::Ignore(_) = err { /* fine */ }
5518 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5523 /// Adds a pending outbound HTLC to this channel, note that you probably want
5524 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5526 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5528 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5529 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5531 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5532 /// we may not yet have sent the previous commitment update messages and will need to
5533 /// regenerate them.
5535 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5536 /// on this [`Channel`] if `force_holding_cell` is false.
5538 /// `Err`s will only be [`ChannelError::Ignore`].
5539 fn send_htlc<F: Deref, L: Deref>(
5540 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5541 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5542 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5543 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5544 where F::Target: FeeEstimator, L::Target: Logger
5546 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5547 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5549 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5550 if amount_msat > channel_total_msat {
5551 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5554 if amount_msat == 0 {
5555 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5558 let available_balances = self.context.get_available_balances(fee_estimator);
5559 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5560 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5561 available_balances.next_outbound_htlc_minimum_msat)));
5564 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5565 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5566 available_balances.next_outbound_htlc_limit_msat)));
5569 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5570 // Note that this should never really happen, if we're !is_live() on receipt of an
5571 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5572 // the user to send directly into a !is_live() channel. However, if we
5573 // disconnected during the time the previous hop was doing the commitment dance we may
5574 // end up getting here after the forwarding delay. In any case, returning an
5575 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5576 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5579 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5580 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5581 payment_hash, amount_msat,
5582 if force_holding_cell { "into holding cell" }
5583 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5584 else { "to peer" });
5586 if need_holding_cell {
5587 force_holding_cell = true;
5590 // Now update local state:
5591 if force_holding_cell {
5592 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5597 onion_routing_packet,
5603 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5604 htlc_id: self.context.next_holder_htlc_id,
5606 payment_hash: payment_hash.clone(),
5608 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5613 let res = msgs::UpdateAddHTLC {
5614 channel_id: self.context.channel_id,
5615 htlc_id: self.context.next_holder_htlc_id,
5619 onion_routing_packet,
5621 blinding_point: None,
5623 self.context.next_holder_htlc_id += 1;
5628 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5629 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5630 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5631 // fail to generate this, we still are at least at a position where upgrading their status
5633 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5634 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5635 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5637 if let Some(state) = new_state {
5638 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5642 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5643 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5644 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5645 // Grab the preimage, if it exists, instead of cloning
5646 let mut reason = OutboundHTLCOutcome::Success(None);
5647 mem::swap(outcome, &mut reason);
5648 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5651 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5652 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5653 debug_assert!(!self.context.is_outbound());
5654 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5655 self.context.feerate_per_kw = feerate;
5656 self.context.pending_update_fee = None;
5659 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5661 let (mut htlcs_ref, counterparty_commitment_tx) =
5662 self.build_commitment_no_state_update(logger);
5663 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5664 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5665 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5667 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5668 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5671 self.context.latest_monitor_update_id += 1;
5672 let monitor_update = ChannelMonitorUpdate {
5673 update_id: self.context.latest_monitor_update_id,
5674 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5675 commitment_txid: counterparty_commitment_txid,
5676 htlc_outputs: htlcs.clone(),
5677 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5678 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5679 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5680 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5681 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5684 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5688 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5689 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5690 where L::Target: Logger
5692 let counterparty_keys = self.context.build_remote_transaction_keys();
5693 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5694 let counterparty_commitment_tx = commitment_stats.tx;
5696 #[cfg(any(test, fuzzing))]
5698 if !self.context.is_outbound() {
5699 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5700 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5701 if let Some(info) = projected_commit_tx_info {
5702 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5703 if info.total_pending_htlcs == total_pending_htlcs
5704 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5705 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5706 && info.feerate == self.context.feerate_per_kw {
5707 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5708 assert_eq!(actual_fee, info.fee);
5714 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5717 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5718 /// generation when we shouldn't change HTLC/channel state.
5719 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5720 // Get the fee tests from `build_commitment_no_state_update`
5721 #[cfg(any(test, fuzzing))]
5722 self.build_commitment_no_state_update(logger);
5724 let counterparty_keys = self.context.build_remote_transaction_keys();
5725 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5726 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5728 match &self.context.holder_signer {
5729 ChannelSignerType::Ecdsa(ecdsa) => {
5730 let (signature, htlc_signatures);
5733 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5734 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5738 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5739 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5741 htlc_signatures = res.1;
5743 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5744 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5745 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5746 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5748 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5749 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5750 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5751 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5752 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5753 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5757 Ok((msgs::CommitmentSigned {
5758 channel_id: self.context.channel_id,
5762 partial_signature_with_nonce: None,
5763 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5765 // TODO (taproot|arik)
5771 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5772 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5774 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5775 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5776 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5777 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5778 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5779 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5780 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5781 where F::Target: FeeEstimator, L::Target: Logger
5783 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5784 onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5785 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5788 let monitor_update = self.build_commitment_no_status_check(logger);
5789 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5790 Ok(self.push_ret_blockable_mon_update(monitor_update))
5796 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5798 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5799 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5800 fee_base_msat: msg.contents.fee_base_msat,
5801 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5802 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5804 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5806 self.context.counterparty_forwarding_info = new_forwarding_info;
5812 /// Begins the shutdown process, getting a message for the remote peer and returning all
5813 /// holding cell HTLCs for payment failure.
5815 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5816 /// [`ChannelMonitorUpdate`] will be returned).
5817 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5818 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5819 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5821 for htlc in self.context.pending_outbound_htlcs.iter() {
5822 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5823 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5826 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5827 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5828 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5830 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5831 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5834 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5835 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5837 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5838 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5839 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5842 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5843 // script is set, we just force-close and call it a day.
5844 let mut chan_closed = false;
5845 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5849 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5851 None if !chan_closed => {
5852 // use override shutdown script if provided
5853 let shutdown_scriptpubkey = match override_shutdown_script {
5854 Some(script) => script,
5856 // otherwise, use the shutdown scriptpubkey provided by the signer
5857 match signer_provider.get_shutdown_scriptpubkey() {
5858 Ok(scriptpubkey) => scriptpubkey,
5859 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5863 if !shutdown_scriptpubkey.is_compatible(their_features) {
5864 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5866 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5872 // From here on out, we may not fail!
5873 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5874 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5875 let shutdown_result = ShutdownResult {
5876 monitor_update: None,
5877 dropped_outbound_htlcs: Vec::new(),
5878 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5880 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5881 Some(shutdown_result)
5883 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5886 self.context.update_time_counter += 1;
5888 let monitor_update = if update_shutdown_script {
5889 self.context.latest_monitor_update_id += 1;
5890 let monitor_update = ChannelMonitorUpdate {
5891 update_id: self.context.latest_monitor_update_id,
5892 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5893 scriptpubkey: self.get_closing_scriptpubkey(),
5896 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5897 self.push_ret_blockable_mon_update(monitor_update)
5899 let shutdown = msgs::Shutdown {
5900 channel_id: self.context.channel_id,
5901 scriptpubkey: self.get_closing_scriptpubkey(),
5904 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5905 // our shutdown until we've committed all of the pending changes.
5906 self.context.holding_cell_update_fee = None;
5907 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5908 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5910 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5911 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5918 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5919 "we can't both complete shutdown and return a monitor update");
5921 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5924 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5925 self.context.holding_cell_htlc_updates.iter()
5926 .flat_map(|htlc_update| {
5928 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5929 => Some((source, payment_hash)),
5933 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5937 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5938 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5939 pub context: ChannelContext<SP>,
5940 pub unfunded_context: UnfundedChannelContext,
5943 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5944 pub fn new<ES: Deref, F: Deref>(
5945 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5946 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5947 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
5948 ) -> Result<OutboundV1Channel<SP>, APIError>
5949 where ES::Target: EntropySource,
5950 F::Target: FeeEstimator
5952 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5953 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5954 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5955 let pubkeys = holder_signer.pubkeys().clone();
5957 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5958 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5960 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5961 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5963 let channel_value_msat = channel_value_satoshis * 1000;
5964 if push_msat > channel_value_msat {
5965 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5967 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5968 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5970 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5971 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5972 // Protocol level safety check in place, although it should never happen because
5973 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5974 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5977 let channel_type = Self::get_initial_channel_type(&config, their_features);
5978 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5980 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5981 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5983 (ConfirmationTarget::NonAnchorChannelFee, 0)
5985 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5987 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5988 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5989 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5990 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5993 let mut secp_ctx = Secp256k1::new();
5994 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5996 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5997 match signer_provider.get_shutdown_scriptpubkey() {
5998 Ok(scriptpubkey) => Some(scriptpubkey),
5999 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6003 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6004 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6005 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6009 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6010 Ok(script) => script,
6011 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6014 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6017 context: ChannelContext {
6020 config: LegacyChannelConfig {
6021 options: config.channel_config.clone(),
6022 announced_channel: config.channel_handshake_config.announced_channel,
6023 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6028 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6030 channel_id: temporary_channel_id,
6031 temporary_channel_id: Some(temporary_channel_id),
6032 channel_state: ChannelState::OurInitSent as u32,
6033 announcement_sigs_state: AnnouncementSigsState::NotSent,
6035 channel_value_satoshis,
6037 latest_monitor_update_id: 0,
6039 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6040 shutdown_scriptpubkey,
6043 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6044 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6047 pending_inbound_htlcs: Vec::new(),
6048 pending_outbound_htlcs: Vec::new(),
6049 holding_cell_htlc_updates: Vec::new(),
6050 pending_update_fee: None,
6051 holding_cell_update_fee: None,
6052 next_holder_htlc_id: 0,
6053 next_counterparty_htlc_id: 0,
6054 update_time_counter: 1,
6056 resend_order: RAACommitmentOrder::CommitmentFirst,
6058 monitor_pending_channel_ready: false,
6059 monitor_pending_revoke_and_ack: false,
6060 monitor_pending_commitment_signed: false,
6061 monitor_pending_forwards: Vec::new(),
6062 monitor_pending_failures: Vec::new(),
6063 monitor_pending_finalized_fulfills: Vec::new(),
6065 signer_pending_commitment_update: false,
6066 signer_pending_funding: false,
6068 #[cfg(debug_assertions)]
6069 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6070 #[cfg(debug_assertions)]
6071 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6073 last_sent_closing_fee: None,
6074 pending_counterparty_closing_signed: None,
6075 expecting_peer_commitment_signed: false,
6076 closing_fee_limits: None,
6077 target_closing_feerate_sats_per_kw: None,
6079 funding_tx_confirmed_in: None,
6080 funding_tx_confirmation_height: 0,
6081 short_channel_id: None,
6082 channel_creation_height: current_chain_height,
6084 feerate_per_kw: commitment_feerate,
6085 counterparty_dust_limit_satoshis: 0,
6086 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6087 counterparty_max_htlc_value_in_flight_msat: 0,
6088 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6089 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6090 holder_selected_channel_reserve_satoshis,
6091 counterparty_htlc_minimum_msat: 0,
6092 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6093 counterparty_max_accepted_htlcs: 0,
6094 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6095 minimum_depth: None, // Filled in in accept_channel
6097 counterparty_forwarding_info: None,
6099 channel_transaction_parameters: ChannelTransactionParameters {
6100 holder_pubkeys: pubkeys,
6101 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6102 is_outbound_from_holder: true,
6103 counterparty_parameters: None,
6104 funding_outpoint: None,
6105 channel_type_features: channel_type.clone()
6107 funding_transaction: None,
6108 is_batch_funding: None,
6110 counterparty_cur_commitment_point: None,
6111 counterparty_prev_commitment_point: None,
6112 counterparty_node_id,
6114 counterparty_shutdown_scriptpubkey: None,
6116 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6118 channel_update_status: ChannelUpdateStatus::Enabled,
6119 closing_signed_in_flight: false,
6121 announcement_sigs: None,
6123 #[cfg(any(test, fuzzing))]
6124 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6125 #[cfg(any(test, fuzzing))]
6126 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6128 workaround_lnd_bug_4006: None,
6129 sent_message_awaiting_response: None,
6131 latest_inbound_scid_alias: None,
6132 outbound_scid_alias,
6134 channel_pending_event_emitted: false,
6135 channel_ready_event_emitted: false,
6137 #[cfg(any(test, fuzzing))]
6138 historical_inbound_htlc_fulfills: HashSet::new(),
6143 blocked_monitor_updates: Vec::new(),
6145 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6149 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6150 /// a funding_created message for the remote peer.
6151 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6152 /// or if called on an inbound channel.
6153 /// Note that channel_id changes during this call!
6154 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6155 /// If an Err is returned, it is a ChannelError::Close.
6156 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6157 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6158 if !self.context.is_outbound() {
6159 panic!("Tried to create outbound funding_created message on an inbound channel!");
6161 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6162 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6164 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6165 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6166 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6167 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6170 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6171 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6173 // Now that we're past error-generating stuff, update our local state:
6175 self.context.channel_state = ChannelState::FundingCreated as u32;
6176 self.context.channel_id = funding_txo.to_channel_id();
6178 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6179 // We can skip this if it is a zero-conf channel.
6180 if funding_transaction.is_coin_base() &&
6181 self.context.minimum_depth.unwrap_or(0) > 0 &&
6182 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6183 self.context.minimum_depth = Some(COINBASE_MATURITY);
6186 self.context.funding_transaction = Some(funding_transaction);
6187 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6189 let funding_created = self.context.get_funding_created_msg(logger);
6190 if funding_created.is_none() {
6191 if !self.context.signer_pending_funding {
6192 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6193 self.context.signer_pending_funding = true;
6197 let channel = Channel {
6198 context: self.context,
6201 Ok((channel, funding_created))
6204 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6205 // The default channel type (ie the first one we try) depends on whether the channel is
6206 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6207 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6208 // with no other changes, and fall back to `only_static_remotekey`.
6209 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6210 if !config.channel_handshake_config.announced_channel &&
6211 config.channel_handshake_config.negotiate_scid_privacy &&
6212 their_features.supports_scid_privacy() {
6213 ret.set_scid_privacy_required();
6216 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6217 // set it now. If they don't understand it, we'll fall back to our default of
6218 // `only_static_remotekey`.
6219 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6220 their_features.supports_anchors_zero_fee_htlc_tx() {
6221 ret.set_anchors_zero_fee_htlc_tx_required();
6227 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6228 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6229 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6230 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6231 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6232 ) -> Result<msgs::OpenChannel, ()>
6234 F::Target: FeeEstimator
6236 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6237 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6238 // We've exhausted our options
6241 // We support opening a few different types of channels. Try removing our additional
6242 // features one by one until we've either arrived at our default or the counterparty has
6245 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6246 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6247 // checks whether the counterparty supports every feature, this would only happen if the
6248 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6250 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6251 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6252 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6253 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6254 } else if self.context.channel_type.supports_scid_privacy() {
6255 self.context.channel_type.clear_scid_privacy();
6257 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6259 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6260 Ok(self.get_open_channel(chain_hash))
6263 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6264 if !self.context.is_outbound() {
6265 panic!("Tried to open a channel for an inbound channel?");
6267 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6268 panic!("Cannot generate an open_channel after we've moved forward");
6271 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6272 panic!("Tried to send an open_channel for a channel that has already advanced");
6275 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6276 let keys = self.context.get_holder_pubkeys();
6280 temporary_channel_id: self.context.channel_id,
6281 funding_satoshis: self.context.channel_value_satoshis,
6282 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6283 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6284 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6285 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6286 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6287 feerate_per_kw: self.context.feerate_per_kw as u32,
6288 to_self_delay: self.context.get_holder_selected_contest_delay(),
6289 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6290 funding_pubkey: keys.funding_pubkey,
6291 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6292 payment_point: keys.payment_point,
6293 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6294 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6295 first_per_commitment_point,
6296 channel_flags: if self.context.config.announced_channel {1} else {0},
6297 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6298 Some(script) => script.clone().into_inner(),
6299 None => Builder::new().into_script(),
6301 channel_type: Some(self.context.channel_type.clone()),
6306 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6307 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6309 // Check sanity of message fields:
6310 if !self.context.is_outbound() {
6311 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6313 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6314 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6316 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6317 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6319 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6320 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6322 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6323 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6325 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6326 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6327 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6329 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6330 if msg.htlc_minimum_msat >= full_channel_value_msat {
6331 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6333 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6334 if msg.to_self_delay > max_delay_acceptable {
6335 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6337 if msg.max_accepted_htlcs < 1 {
6338 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6340 if msg.max_accepted_htlcs > MAX_HTLCS {
6341 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6344 // Now check against optional parameters as set by config...
6345 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6346 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6348 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6349 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6351 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6352 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6354 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6355 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6357 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6358 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6360 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6361 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6363 if msg.minimum_depth > peer_limits.max_minimum_depth {
6364 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6367 if let Some(ty) = &msg.channel_type {
6368 if *ty != self.context.channel_type {
6369 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6371 } else if their_features.supports_channel_type() {
6372 // Assume they've accepted the channel type as they said they understand it.
6374 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6375 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6376 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6378 self.context.channel_type = channel_type.clone();
6379 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6382 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6383 match &msg.shutdown_scriptpubkey {
6384 &Some(ref script) => {
6385 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6386 if script.len() == 0 {
6389 if !script::is_bolt2_compliant(&script, their_features) {
6390 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6392 Some(script.clone())
6395 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6397 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6402 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6403 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6404 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6405 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6406 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6408 if peer_limits.trust_own_funding_0conf {
6409 self.context.minimum_depth = Some(msg.minimum_depth);
6411 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6414 let counterparty_pubkeys = ChannelPublicKeys {
6415 funding_pubkey: msg.funding_pubkey,
6416 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6417 payment_point: msg.payment_point,
6418 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6419 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6422 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6423 selected_contest_delay: msg.to_self_delay,
6424 pubkeys: counterparty_pubkeys,
6427 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6428 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6430 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6431 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6437 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6438 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6439 pub context: ChannelContext<SP>,
6440 pub unfunded_context: UnfundedChannelContext,
6443 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6444 /// Creates a new channel from a remote sides' request for one.
6445 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6446 pub fn new<ES: Deref, F: Deref, L: Deref>(
6447 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6448 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6449 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6450 current_chain_height: u32, logger: &L, is_0conf: bool,
6451 ) -> Result<InboundV1Channel<SP>, ChannelError>
6452 where ES::Target: EntropySource,
6453 F::Target: FeeEstimator,
6456 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6458 // First check the channel type is known, failing before we do anything else if we don't
6459 // support this channel type.
6460 let channel_type = if let Some(channel_type) = &msg.channel_type {
6461 if channel_type.supports_any_optional_bits() {
6462 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6465 // We only support the channel types defined by the `ChannelManager` in
6466 // `provided_channel_type_features`. The channel type must always support
6467 // `static_remote_key`.
6468 if !channel_type.requires_static_remote_key() {
6469 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6471 // Make sure we support all of the features behind the channel type.
6472 if !channel_type.is_subset(our_supported_features) {
6473 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6475 if channel_type.requires_scid_privacy() && announced_channel {
6476 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6478 channel_type.clone()
6480 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6481 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6482 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6487 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6488 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6489 let pubkeys = holder_signer.pubkeys().clone();
6490 let counterparty_pubkeys = ChannelPublicKeys {
6491 funding_pubkey: msg.funding_pubkey,
6492 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6493 payment_point: msg.payment_point,
6494 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6495 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6498 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6499 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6502 // Check sanity of message fields:
6503 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6504 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6506 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6507 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6509 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6510 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6512 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6513 if msg.push_msat > full_channel_value_msat {
6514 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6516 if msg.dust_limit_satoshis > msg.funding_satoshis {
6517 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6519 if msg.htlc_minimum_msat >= full_channel_value_msat {
6520 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6522 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6524 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6525 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6526 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6528 if msg.max_accepted_htlcs < 1 {
6529 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6531 if msg.max_accepted_htlcs > MAX_HTLCS {
6532 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6535 // Now check against optional parameters as set by config...
6536 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6537 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6539 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6540 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6542 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6543 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6545 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6546 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6548 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6549 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6551 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6552 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6554 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6555 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6558 // Convert things into internal flags and prep our state:
6560 if config.channel_handshake_limits.force_announced_channel_preference {
6561 if config.channel_handshake_config.announced_channel != announced_channel {
6562 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6566 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6567 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6568 // Protocol level safety check in place, although it should never happen because
6569 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6570 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6572 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6573 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6575 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6576 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6577 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6579 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6580 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6583 // check if the funder's amount for the initial commitment tx is sufficient
6584 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6585 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6586 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6590 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6591 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6592 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6593 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6596 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6597 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6598 // want to push much to us), our counterparty should always have more than our reserve.
6599 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6600 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6603 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6604 match &msg.shutdown_scriptpubkey {
6605 &Some(ref script) => {
6606 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6607 if script.len() == 0 {
6610 if !script::is_bolt2_compliant(&script, their_features) {
6611 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6613 Some(script.clone())
6616 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6618 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6623 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6624 match signer_provider.get_shutdown_scriptpubkey() {
6625 Ok(scriptpubkey) => Some(scriptpubkey),
6626 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6630 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6631 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6632 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6636 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6637 Ok(script) => script,
6638 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6641 let mut secp_ctx = Secp256k1::new();
6642 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6644 let minimum_depth = if is_0conf {
6647 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6651 context: ChannelContext {
6654 config: LegacyChannelConfig {
6655 options: config.channel_config.clone(),
6657 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6662 inbound_handshake_limits_override: None,
6664 temporary_channel_id: Some(msg.temporary_channel_id),
6665 channel_id: msg.temporary_channel_id,
6666 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6667 announcement_sigs_state: AnnouncementSigsState::NotSent,
6670 latest_monitor_update_id: 0,
6672 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6673 shutdown_scriptpubkey,
6676 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6677 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6678 value_to_self_msat: msg.push_msat,
6680 pending_inbound_htlcs: Vec::new(),
6681 pending_outbound_htlcs: Vec::new(),
6682 holding_cell_htlc_updates: Vec::new(),
6683 pending_update_fee: None,
6684 holding_cell_update_fee: None,
6685 next_holder_htlc_id: 0,
6686 next_counterparty_htlc_id: 0,
6687 update_time_counter: 1,
6689 resend_order: RAACommitmentOrder::CommitmentFirst,
6691 monitor_pending_channel_ready: false,
6692 monitor_pending_revoke_and_ack: false,
6693 monitor_pending_commitment_signed: false,
6694 monitor_pending_forwards: Vec::new(),
6695 monitor_pending_failures: Vec::new(),
6696 monitor_pending_finalized_fulfills: Vec::new(),
6698 signer_pending_commitment_update: false,
6699 signer_pending_funding: false,
6701 #[cfg(debug_assertions)]
6702 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6703 #[cfg(debug_assertions)]
6704 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6706 last_sent_closing_fee: None,
6707 pending_counterparty_closing_signed: None,
6708 expecting_peer_commitment_signed: false,
6709 closing_fee_limits: None,
6710 target_closing_feerate_sats_per_kw: None,
6712 funding_tx_confirmed_in: None,
6713 funding_tx_confirmation_height: 0,
6714 short_channel_id: None,
6715 channel_creation_height: current_chain_height,
6717 feerate_per_kw: msg.feerate_per_kw,
6718 channel_value_satoshis: msg.funding_satoshis,
6719 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6720 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6721 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6722 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6723 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6724 holder_selected_channel_reserve_satoshis,
6725 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6726 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6727 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6728 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6731 counterparty_forwarding_info: None,
6733 channel_transaction_parameters: ChannelTransactionParameters {
6734 holder_pubkeys: pubkeys,
6735 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6736 is_outbound_from_holder: false,
6737 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6738 selected_contest_delay: msg.to_self_delay,
6739 pubkeys: counterparty_pubkeys,
6741 funding_outpoint: None,
6742 channel_type_features: channel_type.clone()
6744 funding_transaction: None,
6745 is_batch_funding: None,
6747 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6748 counterparty_prev_commitment_point: None,
6749 counterparty_node_id,
6751 counterparty_shutdown_scriptpubkey,
6753 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6755 channel_update_status: ChannelUpdateStatus::Enabled,
6756 closing_signed_in_flight: false,
6758 announcement_sigs: None,
6760 #[cfg(any(test, fuzzing))]
6761 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6762 #[cfg(any(test, fuzzing))]
6763 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6765 workaround_lnd_bug_4006: None,
6766 sent_message_awaiting_response: None,
6768 latest_inbound_scid_alias: None,
6769 outbound_scid_alias: 0,
6771 channel_pending_event_emitted: false,
6772 channel_ready_event_emitted: false,
6774 #[cfg(any(test, fuzzing))]
6775 historical_inbound_htlc_fulfills: HashSet::new(),
6780 blocked_monitor_updates: Vec::new(),
6782 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6788 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6789 /// should be sent back to the counterparty node.
6791 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6792 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6793 if self.context.is_outbound() {
6794 panic!("Tried to send accept_channel for an outbound channel?");
6796 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6797 panic!("Tried to send accept_channel after channel had moved forward");
6799 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6800 panic!("Tried to send an accept_channel for a channel that has already advanced");
6803 self.generate_accept_channel_message()
6806 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6807 /// inbound channel. If the intention is to accept an inbound channel, use
6808 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6810 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6811 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6812 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6813 let keys = self.context.get_holder_pubkeys();
6815 msgs::AcceptChannel {
6816 temporary_channel_id: self.context.channel_id,
6817 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6818 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6819 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6820 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6821 minimum_depth: self.context.minimum_depth.unwrap(),
6822 to_self_delay: self.context.get_holder_selected_contest_delay(),
6823 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6824 funding_pubkey: keys.funding_pubkey,
6825 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6826 payment_point: keys.payment_point,
6827 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6828 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6829 first_per_commitment_point,
6830 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6831 Some(script) => script.clone().into_inner(),
6832 None => Builder::new().into_script(),
6834 channel_type: Some(self.context.channel_type.clone()),
6836 next_local_nonce: None,
6840 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6841 /// inbound channel without accepting it.
6843 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6845 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6846 self.generate_accept_channel_message()
6849 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6850 let funding_script = self.context.get_funding_redeemscript();
6852 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6853 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6854 let trusted_tx = initial_commitment_tx.trust();
6855 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6856 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6857 // They sign the holder commitment transaction...
6858 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6859 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6860 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6861 encode::serialize_hex(&funding_script), &self.context.channel_id());
6862 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6864 Ok(initial_commitment_tx)
6867 pub fn funding_created<L: Deref>(
6868 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6869 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6873 if self.context.is_outbound() {
6874 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6876 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6877 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6878 // remember the channel, so it's safe to just send an error_message here and drop the
6880 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6882 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6883 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6884 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6885 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6888 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6889 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6890 // This is an externally observable change before we finish all our checks. In particular
6891 // check_funding_created_signature may fail.
6892 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6894 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6896 Err(ChannelError::Close(e)) => {
6897 self.context.channel_transaction_parameters.funding_outpoint = None;
6898 return Err((self, ChannelError::Close(e)));
6901 // The only error we know how to handle is ChannelError::Close, so we fall over here
6902 // to make sure we don't continue with an inconsistent state.
6903 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6907 let holder_commitment_tx = HolderCommitmentTransaction::new(
6908 initial_commitment_tx,
6911 &self.context.get_holder_pubkeys().funding_pubkey,
6912 self.context.counterparty_funding_pubkey()
6915 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6916 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6919 // Now that we're past error-generating stuff, update our local state:
6921 self.context.channel_state = ChannelState::FundingSent as u32;
6922 self.context.channel_id = funding_txo.to_channel_id();
6923 self.context.cur_counterparty_commitment_transaction_number -= 1;
6924 self.context.cur_holder_commitment_transaction_number -= 1;
6926 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6928 let funding_redeemscript = self.context.get_funding_redeemscript();
6929 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6930 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6931 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6932 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6933 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6934 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6935 shutdown_script, self.context.get_holder_selected_contest_delay(),
6936 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6937 &self.context.channel_transaction_parameters,
6938 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6940 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6942 channel_monitor.provide_initial_counterparty_commitment_tx(
6943 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6944 self.context.cur_counterparty_commitment_transaction_number + 1,
6945 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6946 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6947 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6949 log_info!(logger, "{} funding_signed for peer for channel {}",
6950 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
6952 // Promote the channel to a full-fledged one now that we have updated the state and have a
6953 // `ChannelMonitor`.
6954 let mut channel = Channel {
6955 context: self.context,
6957 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6958 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6960 Ok((channel, funding_signed, channel_monitor))
6964 const SERIALIZATION_VERSION: u8 = 3;
6965 const MIN_SERIALIZATION_VERSION: u8 = 3;
6967 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6973 impl Writeable for ChannelUpdateStatus {
6974 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6975 // We only care about writing out the current state as it was announced, ie only either
6976 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6977 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6979 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6980 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6981 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6982 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6988 impl Readable for ChannelUpdateStatus {
6989 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6990 Ok(match <u8 as Readable>::read(reader)? {
6991 0 => ChannelUpdateStatus::Enabled,
6992 1 => ChannelUpdateStatus::Disabled,
6993 _ => return Err(DecodeError::InvalidValue),
6998 impl Writeable for AnnouncementSigsState {
6999 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7000 // We only care about writing out the current state as if we had just disconnected, at
7001 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7003 AnnouncementSigsState::NotSent => 0u8.write(writer),
7004 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7005 AnnouncementSigsState::Committed => 0u8.write(writer),
7006 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7011 impl Readable for AnnouncementSigsState {
7012 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7013 Ok(match <u8 as Readable>::read(reader)? {
7014 0 => AnnouncementSigsState::NotSent,
7015 1 => AnnouncementSigsState::PeerReceived,
7016 _ => return Err(DecodeError::InvalidValue),
7021 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7022 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7023 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7026 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7028 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7029 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7030 // the low bytes now and the optional high bytes later.
7031 let user_id_low = self.context.user_id as u64;
7032 user_id_low.write(writer)?;
7034 // Version 1 deserializers expected to read parts of the config object here. Version 2
7035 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7036 // `minimum_depth` we simply write dummy values here.
7037 writer.write_all(&[0; 8])?;
7039 self.context.channel_id.write(writer)?;
7040 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7041 self.context.channel_value_satoshis.write(writer)?;
7043 self.context.latest_monitor_update_id.write(writer)?;
7045 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7046 // deserialized from that format.
7047 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7048 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7049 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7051 self.context.destination_script.write(writer)?;
7053 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7054 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7055 self.context.value_to_self_msat.write(writer)?;
7057 let mut dropped_inbound_htlcs = 0;
7058 for htlc in self.context.pending_inbound_htlcs.iter() {
7059 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7060 dropped_inbound_htlcs += 1;
7063 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7064 for htlc in self.context.pending_inbound_htlcs.iter() {
7065 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7068 htlc.htlc_id.write(writer)?;
7069 htlc.amount_msat.write(writer)?;
7070 htlc.cltv_expiry.write(writer)?;
7071 htlc.payment_hash.write(writer)?;
7073 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7074 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7076 htlc_state.write(writer)?;
7078 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7080 htlc_state.write(writer)?;
7082 &InboundHTLCState::Committed => {
7085 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7087 removal_reason.write(writer)?;
7092 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7093 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7095 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7096 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7097 htlc.htlc_id.write(writer)?;
7098 htlc.amount_msat.write(writer)?;
7099 htlc.cltv_expiry.write(writer)?;
7100 htlc.payment_hash.write(writer)?;
7101 htlc.source.write(writer)?;
7103 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7105 onion_packet.write(writer)?;
7107 &OutboundHTLCState::Committed => {
7110 &OutboundHTLCState::RemoteRemoved(_) => {
7111 // Treat this as a Committed because we haven't received the CS - they'll
7112 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7115 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7117 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7118 preimages.push(preimage);
7120 let reason: Option<&HTLCFailReason> = outcome.into();
7121 reason.write(writer)?;
7123 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7125 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7126 preimages.push(preimage);
7128 let reason: Option<&HTLCFailReason> = outcome.into();
7129 reason.write(writer)?;
7132 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7133 if pending_outbound_skimmed_fees.is_empty() {
7134 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7136 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7137 } else if !pending_outbound_skimmed_fees.is_empty() {
7138 pending_outbound_skimmed_fees.push(None);
7142 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7143 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7144 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7146 &HTLCUpdateAwaitingACK::AddHTLC {
7147 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7151 amount_msat.write(writer)?;
7152 cltv_expiry.write(writer)?;
7153 payment_hash.write(writer)?;
7154 source.write(writer)?;
7155 onion_routing_packet.write(writer)?;
7157 if let Some(skimmed_fee) = skimmed_fee_msat {
7158 if holding_cell_skimmed_fees.is_empty() {
7159 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7161 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7162 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7164 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7166 payment_preimage.write(writer)?;
7167 htlc_id.write(writer)?;
7169 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7171 htlc_id.write(writer)?;
7172 err_packet.write(writer)?;
7177 match self.context.resend_order {
7178 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7179 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7182 self.context.monitor_pending_channel_ready.write(writer)?;
7183 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7184 self.context.monitor_pending_commitment_signed.write(writer)?;
7186 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7187 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7188 pending_forward.write(writer)?;
7189 htlc_id.write(writer)?;
7192 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7193 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7194 htlc_source.write(writer)?;
7195 payment_hash.write(writer)?;
7196 fail_reason.write(writer)?;
7199 if self.context.is_outbound() {
7200 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7201 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7202 Some(feerate).write(writer)?;
7204 // As for inbound HTLCs, if the update was only announced and never committed in a
7205 // commitment_signed, drop it.
7206 None::<u32>.write(writer)?;
7208 self.context.holding_cell_update_fee.write(writer)?;
7210 self.context.next_holder_htlc_id.write(writer)?;
7211 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7212 self.context.update_time_counter.write(writer)?;
7213 self.context.feerate_per_kw.write(writer)?;
7215 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7216 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7217 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7218 // consider the stale state on reload.
7221 self.context.funding_tx_confirmed_in.write(writer)?;
7222 self.context.funding_tx_confirmation_height.write(writer)?;
7223 self.context.short_channel_id.write(writer)?;
7225 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7226 self.context.holder_dust_limit_satoshis.write(writer)?;
7227 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7229 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7230 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7232 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7233 self.context.holder_htlc_minimum_msat.write(writer)?;
7234 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7236 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7237 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7239 match &self.context.counterparty_forwarding_info {
7242 info.fee_base_msat.write(writer)?;
7243 info.fee_proportional_millionths.write(writer)?;
7244 info.cltv_expiry_delta.write(writer)?;
7246 None => 0u8.write(writer)?
7249 self.context.channel_transaction_parameters.write(writer)?;
7250 self.context.funding_transaction.write(writer)?;
7252 self.context.counterparty_cur_commitment_point.write(writer)?;
7253 self.context.counterparty_prev_commitment_point.write(writer)?;
7254 self.context.counterparty_node_id.write(writer)?;
7256 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7258 self.context.commitment_secrets.write(writer)?;
7260 self.context.channel_update_status.write(writer)?;
7262 #[cfg(any(test, fuzzing))]
7263 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7264 #[cfg(any(test, fuzzing))]
7265 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7266 htlc.write(writer)?;
7269 // If the channel type is something other than only-static-remote-key, then we need to have
7270 // older clients fail to deserialize this channel at all. If the type is
7271 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7273 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7274 Some(&self.context.channel_type) } else { None };
7276 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7277 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7278 // a different percentage of the channel value then 10%, which older versions of LDK used
7279 // to set it to before the percentage was made configurable.
7280 let serialized_holder_selected_reserve =
7281 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7282 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7284 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7285 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7286 let serialized_holder_htlc_max_in_flight =
7287 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7288 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7290 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7291 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7293 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7294 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7295 // we write the high bytes as an option here.
7296 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7298 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7300 write_tlv_fields!(writer, {
7301 (0, self.context.announcement_sigs, option),
7302 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7303 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7304 // them twice, once with their original default values above, and once as an option
7305 // here. On the read side, old versions will simply ignore the odd-type entries here,
7306 // and new versions map the default values to None and allow the TLV entries here to
7308 (1, self.context.minimum_depth, option),
7309 (2, chan_type, option),
7310 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7311 (4, serialized_holder_selected_reserve, option),
7312 (5, self.context.config, required),
7313 (6, serialized_holder_htlc_max_in_flight, option),
7314 (7, self.context.shutdown_scriptpubkey, option),
7315 (8, self.context.blocked_monitor_updates, optional_vec),
7316 (9, self.context.target_closing_feerate_sats_per_kw, option),
7317 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7318 (13, self.context.channel_creation_height, required),
7319 (15, preimages, required_vec),
7320 (17, self.context.announcement_sigs_state, required),
7321 (19, self.context.latest_inbound_scid_alias, option),
7322 (21, self.context.outbound_scid_alias, required),
7323 (23, channel_ready_event_emitted, option),
7324 (25, user_id_high_opt, option),
7325 (27, self.context.channel_keys_id, required),
7326 (28, holder_max_accepted_htlcs, option),
7327 (29, self.context.temporary_channel_id, option),
7328 (31, channel_pending_event_emitted, option),
7329 (35, pending_outbound_skimmed_fees, optional_vec),
7330 (37, holding_cell_skimmed_fees, optional_vec),
7331 (38, self.context.is_batch_funding, option),
7338 const MAX_ALLOC_SIZE: usize = 64*1024;
7339 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7341 ES::Target: EntropySource,
7342 SP::Target: SignerProvider
7344 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7345 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7346 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7348 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7349 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7350 // the low bytes now and the high bytes later.
7351 let user_id_low: u64 = Readable::read(reader)?;
7353 let mut config = Some(LegacyChannelConfig::default());
7355 // Read the old serialization of the ChannelConfig from version 0.0.98.
7356 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7357 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7358 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7359 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7361 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7362 let mut _val: u64 = Readable::read(reader)?;
7365 let channel_id = Readable::read(reader)?;
7366 let channel_state = Readable::read(reader)?;
7367 let channel_value_satoshis = Readable::read(reader)?;
7369 let latest_monitor_update_id = Readable::read(reader)?;
7371 let mut keys_data = None;
7373 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7374 // the `channel_keys_id` TLV is present below.
7375 let keys_len: u32 = Readable::read(reader)?;
7376 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7377 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7378 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7379 let mut data = [0; 1024];
7380 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7381 reader.read_exact(read_slice)?;
7382 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7386 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7387 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7388 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7391 let destination_script = Readable::read(reader)?;
7393 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7394 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7395 let value_to_self_msat = Readable::read(reader)?;
7397 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7399 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7400 for _ in 0..pending_inbound_htlc_count {
7401 pending_inbound_htlcs.push(InboundHTLCOutput {
7402 htlc_id: Readable::read(reader)?,
7403 amount_msat: Readable::read(reader)?,
7404 cltv_expiry: Readable::read(reader)?,
7405 payment_hash: Readable::read(reader)?,
7406 state: match <u8 as Readable>::read(reader)? {
7407 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7408 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7409 3 => InboundHTLCState::Committed,
7410 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7411 _ => return Err(DecodeError::InvalidValue),
7416 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7417 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7418 for _ in 0..pending_outbound_htlc_count {
7419 pending_outbound_htlcs.push(OutboundHTLCOutput {
7420 htlc_id: Readable::read(reader)?,
7421 amount_msat: Readable::read(reader)?,
7422 cltv_expiry: Readable::read(reader)?,
7423 payment_hash: Readable::read(reader)?,
7424 source: Readable::read(reader)?,
7425 state: match <u8 as Readable>::read(reader)? {
7426 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7427 1 => OutboundHTLCState::Committed,
7429 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7430 OutboundHTLCState::RemoteRemoved(option.into())
7433 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7434 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7437 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7438 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7440 _ => return Err(DecodeError::InvalidValue),
7442 skimmed_fee_msat: None,
7446 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7447 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7448 for _ in 0..holding_cell_htlc_update_count {
7449 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7450 0 => HTLCUpdateAwaitingACK::AddHTLC {
7451 amount_msat: Readable::read(reader)?,
7452 cltv_expiry: Readable::read(reader)?,
7453 payment_hash: Readable::read(reader)?,
7454 source: Readable::read(reader)?,
7455 onion_routing_packet: Readable::read(reader)?,
7456 skimmed_fee_msat: None,
7458 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7459 payment_preimage: Readable::read(reader)?,
7460 htlc_id: Readable::read(reader)?,
7462 2 => HTLCUpdateAwaitingACK::FailHTLC {
7463 htlc_id: Readable::read(reader)?,
7464 err_packet: Readable::read(reader)?,
7466 _ => return Err(DecodeError::InvalidValue),
7470 let resend_order = match <u8 as Readable>::read(reader)? {
7471 0 => RAACommitmentOrder::CommitmentFirst,
7472 1 => RAACommitmentOrder::RevokeAndACKFirst,
7473 _ => return Err(DecodeError::InvalidValue),
7476 let monitor_pending_channel_ready = Readable::read(reader)?;
7477 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7478 let monitor_pending_commitment_signed = Readable::read(reader)?;
7480 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7481 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7482 for _ in 0..monitor_pending_forwards_count {
7483 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7486 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7487 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7488 for _ in 0..monitor_pending_failures_count {
7489 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7492 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7494 let holding_cell_update_fee = Readable::read(reader)?;
7496 let next_holder_htlc_id = Readable::read(reader)?;
7497 let next_counterparty_htlc_id = Readable::read(reader)?;
7498 let update_time_counter = Readable::read(reader)?;
7499 let feerate_per_kw = Readable::read(reader)?;
7501 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7502 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7503 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7504 // consider the stale state on reload.
7505 match <u8 as Readable>::read(reader)? {
7508 let _: u32 = Readable::read(reader)?;
7509 let _: u64 = Readable::read(reader)?;
7510 let _: Signature = Readable::read(reader)?;
7512 _ => return Err(DecodeError::InvalidValue),
7515 let funding_tx_confirmed_in = Readable::read(reader)?;
7516 let funding_tx_confirmation_height = Readable::read(reader)?;
7517 let short_channel_id = Readable::read(reader)?;
7519 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7520 let holder_dust_limit_satoshis = Readable::read(reader)?;
7521 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7522 let mut counterparty_selected_channel_reserve_satoshis = None;
7524 // Read the old serialization from version 0.0.98.
7525 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7527 // Read the 8 bytes of backwards-compatibility data.
7528 let _dummy: u64 = Readable::read(reader)?;
7530 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7531 let holder_htlc_minimum_msat = Readable::read(reader)?;
7532 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7534 let mut minimum_depth = None;
7536 // Read the old serialization from version 0.0.98.
7537 minimum_depth = Some(Readable::read(reader)?);
7539 // Read the 4 bytes of backwards-compatibility data.
7540 let _dummy: u32 = Readable::read(reader)?;
7543 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7545 1 => Some(CounterpartyForwardingInfo {
7546 fee_base_msat: Readable::read(reader)?,
7547 fee_proportional_millionths: Readable::read(reader)?,
7548 cltv_expiry_delta: Readable::read(reader)?,
7550 _ => return Err(DecodeError::InvalidValue),
7553 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7554 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7556 let counterparty_cur_commitment_point = Readable::read(reader)?;
7558 let counterparty_prev_commitment_point = Readable::read(reader)?;
7559 let counterparty_node_id = Readable::read(reader)?;
7561 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7562 let commitment_secrets = Readable::read(reader)?;
7564 let channel_update_status = Readable::read(reader)?;
7566 #[cfg(any(test, fuzzing))]
7567 let mut historical_inbound_htlc_fulfills = HashSet::new();
7568 #[cfg(any(test, fuzzing))]
7570 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7571 for _ in 0..htlc_fulfills_len {
7572 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7576 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7577 Some((feerate, if channel_parameters.is_outbound_from_holder {
7578 FeeUpdateState::Outbound
7580 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7586 let mut announcement_sigs = None;
7587 let mut target_closing_feerate_sats_per_kw = None;
7588 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7589 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7590 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7591 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7592 // only, so we default to that if none was written.
7593 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7594 let mut channel_creation_height = Some(serialized_height);
7595 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7597 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7598 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7599 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7600 let mut latest_inbound_scid_alias = None;
7601 let mut outbound_scid_alias = None;
7602 let mut channel_pending_event_emitted = None;
7603 let mut channel_ready_event_emitted = None;
7605 let mut user_id_high_opt: Option<u64> = None;
7606 let mut channel_keys_id: Option<[u8; 32]> = None;
7607 let mut temporary_channel_id: Option<ChannelId> = None;
7608 let mut holder_max_accepted_htlcs: Option<u16> = None;
7610 let mut blocked_monitor_updates = Some(Vec::new());
7612 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7613 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7615 let mut is_batch_funding: Option<()> = None;
7617 read_tlv_fields!(reader, {
7618 (0, announcement_sigs, option),
7619 (1, minimum_depth, option),
7620 (2, channel_type, option),
7621 (3, counterparty_selected_channel_reserve_satoshis, option),
7622 (4, holder_selected_channel_reserve_satoshis, option),
7623 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7624 (6, holder_max_htlc_value_in_flight_msat, option),
7625 (7, shutdown_scriptpubkey, option),
7626 (8, blocked_monitor_updates, optional_vec),
7627 (9, target_closing_feerate_sats_per_kw, option),
7628 (11, monitor_pending_finalized_fulfills, optional_vec),
7629 (13, channel_creation_height, option),
7630 (15, preimages_opt, optional_vec),
7631 (17, announcement_sigs_state, option),
7632 (19, latest_inbound_scid_alias, option),
7633 (21, outbound_scid_alias, option),
7634 (23, channel_ready_event_emitted, option),
7635 (25, user_id_high_opt, option),
7636 (27, channel_keys_id, option),
7637 (28, holder_max_accepted_htlcs, option),
7638 (29, temporary_channel_id, option),
7639 (31, channel_pending_event_emitted, option),
7640 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7641 (37, holding_cell_skimmed_fees_opt, optional_vec),
7642 (38, is_batch_funding, option),
7645 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7646 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7647 // If we've gotten to the funding stage of the channel, populate the signer with its
7648 // required channel parameters.
7649 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7650 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7651 holder_signer.provide_channel_parameters(&channel_parameters);
7653 (channel_keys_id, holder_signer)
7655 // `keys_data` can be `None` if we had corrupted data.
7656 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7657 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7658 (holder_signer.channel_keys_id(), holder_signer)
7661 if let Some(preimages) = preimages_opt {
7662 let mut iter = preimages.into_iter();
7663 for htlc in pending_outbound_htlcs.iter_mut() {
7665 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7666 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7668 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7669 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7674 // We expect all preimages to be consumed above
7675 if iter.next().is_some() {
7676 return Err(DecodeError::InvalidValue);
7680 let chan_features = channel_type.as_ref().unwrap();
7681 if !chan_features.is_subset(our_supported_features) {
7682 // If the channel was written by a new version and negotiated with features we don't
7683 // understand yet, refuse to read it.
7684 return Err(DecodeError::UnknownRequiredFeature);
7687 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7688 // To account for that, we're proactively setting/overriding the field here.
7689 channel_parameters.channel_type_features = chan_features.clone();
7691 let mut secp_ctx = Secp256k1::new();
7692 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7694 // `user_id` used to be a single u64 value. In order to remain backwards
7695 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7696 // separate u64 values.
7697 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7699 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7701 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7702 let mut iter = skimmed_fees.into_iter();
7703 for htlc in pending_outbound_htlcs.iter_mut() {
7704 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7706 // We expect all skimmed fees to be consumed above
7707 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7709 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7710 let mut iter = skimmed_fees.into_iter();
7711 for htlc in holding_cell_htlc_updates.iter_mut() {
7712 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7713 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7716 // We expect all skimmed fees to be consumed above
7717 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7721 context: ChannelContext {
7724 config: config.unwrap(),
7728 // Note that we don't care about serializing handshake limits as we only ever serialize
7729 // channel data after the handshake has completed.
7730 inbound_handshake_limits_override: None,
7733 temporary_channel_id,
7735 announcement_sigs_state: announcement_sigs_state.unwrap(),
7737 channel_value_satoshis,
7739 latest_monitor_update_id,
7741 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7742 shutdown_scriptpubkey,
7745 cur_holder_commitment_transaction_number,
7746 cur_counterparty_commitment_transaction_number,
7749 holder_max_accepted_htlcs,
7750 pending_inbound_htlcs,
7751 pending_outbound_htlcs,
7752 holding_cell_htlc_updates,
7756 monitor_pending_channel_ready,
7757 monitor_pending_revoke_and_ack,
7758 monitor_pending_commitment_signed,
7759 monitor_pending_forwards,
7760 monitor_pending_failures,
7761 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7763 signer_pending_commitment_update: false,
7764 signer_pending_funding: false,
7767 holding_cell_update_fee,
7768 next_holder_htlc_id,
7769 next_counterparty_htlc_id,
7770 update_time_counter,
7773 #[cfg(debug_assertions)]
7774 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7775 #[cfg(debug_assertions)]
7776 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7778 last_sent_closing_fee: None,
7779 pending_counterparty_closing_signed: None,
7780 expecting_peer_commitment_signed: false,
7781 closing_fee_limits: None,
7782 target_closing_feerate_sats_per_kw,
7784 funding_tx_confirmed_in,
7785 funding_tx_confirmation_height,
7787 channel_creation_height: channel_creation_height.unwrap(),
7789 counterparty_dust_limit_satoshis,
7790 holder_dust_limit_satoshis,
7791 counterparty_max_htlc_value_in_flight_msat,
7792 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7793 counterparty_selected_channel_reserve_satoshis,
7794 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7795 counterparty_htlc_minimum_msat,
7796 holder_htlc_minimum_msat,
7797 counterparty_max_accepted_htlcs,
7800 counterparty_forwarding_info,
7802 channel_transaction_parameters: channel_parameters,
7803 funding_transaction,
7806 counterparty_cur_commitment_point,
7807 counterparty_prev_commitment_point,
7808 counterparty_node_id,
7810 counterparty_shutdown_scriptpubkey,
7814 channel_update_status,
7815 closing_signed_in_flight: false,
7819 #[cfg(any(test, fuzzing))]
7820 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7821 #[cfg(any(test, fuzzing))]
7822 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7824 workaround_lnd_bug_4006: None,
7825 sent_message_awaiting_response: None,
7827 latest_inbound_scid_alias,
7828 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7829 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7831 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7832 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7834 #[cfg(any(test, fuzzing))]
7835 historical_inbound_htlc_fulfills,
7837 channel_type: channel_type.unwrap(),
7840 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7849 use bitcoin::blockdata::constants::ChainHash;
7850 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7851 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7852 use bitcoin::blockdata::opcodes;
7853 use bitcoin::network::constants::Network;
7854 use crate::ln::PaymentHash;
7855 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7856 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7857 use crate::ln::channel::InitFeatures;
7858 use crate::ln::channel::{ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7859 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7860 use crate::ln::features::ChannelTypeFeatures;
7861 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7862 use crate::ln::script::ShutdownScript;
7863 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7864 use crate::chain::BestBlock;
7865 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7866 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7867 use crate::chain::transaction::OutPoint;
7868 use crate::routing::router::Path;
7869 use crate::util::config::UserConfig;
7870 use crate::util::errors::APIError;
7871 use crate::util::test_utils;
7872 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7873 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7874 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7875 use bitcoin::secp256k1::{SecretKey,PublicKey};
7876 use bitcoin::hashes::sha256::Hash as Sha256;
7877 use bitcoin::hashes::Hash;
7878 use bitcoin::hashes::hex::FromHex;
7879 use bitcoin::hash_types::WPubkeyHash;
7880 use bitcoin::blockdata::locktime::absolute::LockTime;
7881 use bitcoin::address::{WitnessProgram, WitnessVersion};
7882 use crate::prelude::*;
7884 struct TestFeeEstimator {
7887 impl FeeEstimator for TestFeeEstimator {
7888 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7894 fn test_max_funding_satoshis_no_wumbo() {
7895 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7896 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7897 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7901 signer: InMemorySigner,
7904 impl EntropySource for Keys {
7905 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7908 impl SignerProvider for Keys {
7909 type EcdsaSigner = InMemorySigner;
7911 type TaprootSigner = InMemorySigner;
7913 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7914 self.signer.channel_keys_id()
7917 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
7921 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
7923 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
7924 let secp_ctx = Secp256k1::signing_only();
7925 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7926 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7927 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
7930 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7931 let secp_ctx = Secp256k1::signing_only();
7932 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7933 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7937 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7938 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7939 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
7943 fn upfront_shutdown_script_incompatibility() {
7944 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7945 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
7946 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
7949 let seed = [42; 32];
7950 let network = Network::Testnet;
7951 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7952 keys_provider.expect(OnGetShutdownScriptpubkey {
7953 returns: non_v0_segwit_shutdown_script.clone(),
7956 let secp_ctx = Secp256k1::new();
7957 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7958 let config = UserConfig::default();
7959 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
7960 Err(APIError::IncompatibleShutdownScript { script }) => {
7961 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7963 Err(e) => panic!("Unexpected error: {:?}", e),
7964 Ok(_) => panic!("Expected error"),
7968 // Check that, during channel creation, we use the same feerate in the open channel message
7969 // as we do in the Channel object creation itself.
7971 fn test_open_channel_msg_fee() {
7972 let original_fee = 253;
7973 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7974 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7975 let secp_ctx = Secp256k1::new();
7976 let seed = [42; 32];
7977 let network = Network::Testnet;
7978 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7980 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7981 let config = UserConfig::default();
7982 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
7984 // Now change the fee so we can check that the fee in the open_channel message is the
7985 // same as the old fee.
7986 fee_est.fee_est = 500;
7987 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7988 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7992 fn test_holder_vs_counterparty_dust_limit() {
7993 // Test that when calculating the local and remote commitment transaction fees, the correct
7994 // dust limits are used.
7995 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7996 let secp_ctx = Secp256k1::new();
7997 let seed = [42; 32];
7998 let network = Network::Testnet;
7999 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8000 let logger = test_utils::TestLogger::new();
8001 let best_block = BestBlock::from_network(network);
8003 // Go through the flow of opening a channel between two nodes, making sure
8004 // they have different dust limits.
8006 // Create Node A's channel pointing to Node B's pubkey
8007 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8008 let config = UserConfig::default();
8009 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8011 // Create Node B's channel by receiving Node A's open_channel message
8012 // Make sure A's dust limit is as we expect.
8013 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8014 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8015 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8017 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8018 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8019 accept_channel_msg.dust_limit_satoshis = 546;
8020 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8021 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8023 // Node A --> Node B: funding created
8024 let output_script = node_a_chan.context.get_funding_redeemscript();
8025 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8026 value: 10000000, script_pubkey: output_script.clone(),
8028 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8029 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8030 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8032 // Node B --> Node A: funding signed
8033 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8035 // Put some inbound and outbound HTLCs in A's channel.
8036 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8037 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8039 amount_msat: htlc_amount_msat,
8040 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8041 cltv_expiry: 300000000,
8042 state: InboundHTLCState::Committed,
8045 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8047 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8048 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8049 cltv_expiry: 200000000,
8050 state: OutboundHTLCState::Committed,
8051 source: HTLCSource::OutboundRoute {
8052 path: Path { hops: Vec::new(), blinded_tail: None },
8053 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8054 first_hop_htlc_msat: 548,
8055 payment_id: PaymentId([42; 32]),
8057 skimmed_fee_msat: None,
8060 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8061 // the dust limit check.
8062 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8063 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8064 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8065 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8067 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8068 // of the HTLCs are seen to be above the dust limit.
8069 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8070 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8071 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8072 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8073 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8077 fn test_timeout_vs_success_htlc_dust_limit() {
8078 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8079 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8080 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8081 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8082 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8083 let secp_ctx = Secp256k1::new();
8084 let seed = [42; 32];
8085 let network = Network::Testnet;
8086 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8088 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8089 let config = UserConfig::default();
8090 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8092 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8093 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8095 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8096 // counted as dust when it shouldn't be.
8097 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8098 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8099 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8100 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8102 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8103 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8104 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8105 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8106 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8108 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8110 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8111 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8112 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8113 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8114 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8116 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8117 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8118 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8119 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8120 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8124 fn channel_reestablish_no_updates() {
8125 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8126 let logger = test_utils::TestLogger::new();
8127 let secp_ctx = Secp256k1::new();
8128 let seed = [42; 32];
8129 let network = Network::Testnet;
8130 let best_block = BestBlock::from_network(network);
8131 let chain_hash = ChainHash::using_genesis_block(network);
8132 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8134 // Go through the flow of opening a channel between two nodes.
8136 // Create Node A's channel pointing to Node B's pubkey
8137 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8138 let config = UserConfig::default();
8139 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8141 // Create Node B's channel by receiving Node A's open_channel message
8142 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8143 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8144 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8146 // Node B --> Node A: accept channel
8147 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8148 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8150 // Node A --> Node B: funding created
8151 let output_script = node_a_chan.context.get_funding_redeemscript();
8152 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8153 value: 10000000, script_pubkey: output_script.clone(),
8155 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8156 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8157 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8159 // Node B --> Node A: funding signed
8160 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8162 // Now disconnect the two nodes and check that the commitment point in
8163 // Node B's channel_reestablish message is sane.
8164 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8165 let msg = node_b_chan.get_channel_reestablish(&&logger);
8166 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8167 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8168 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8170 // Check that the commitment point in Node A's channel_reestablish message
8172 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8173 let msg = node_a_chan.get_channel_reestablish(&&logger);
8174 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8175 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8176 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8180 fn test_configured_holder_max_htlc_value_in_flight() {
8181 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8182 let logger = test_utils::TestLogger::new();
8183 let secp_ctx = Secp256k1::new();
8184 let seed = [42; 32];
8185 let network = Network::Testnet;
8186 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8187 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8188 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8190 let mut config_2_percent = UserConfig::default();
8191 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8192 let mut config_99_percent = UserConfig::default();
8193 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8194 let mut config_0_percent = UserConfig::default();
8195 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8196 let mut config_101_percent = UserConfig::default();
8197 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8199 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8200 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8201 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8202 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8203 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8204 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8206 // Test with the upper bound - 1 of valid values (99%).
8207 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8208 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8209 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8211 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8213 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8214 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8215 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8216 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8217 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8218 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8220 // Test with the upper bound - 1 of valid values (99%).
8221 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8222 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8223 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8225 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8226 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8227 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8228 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8229 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8231 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8232 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8234 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8235 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8236 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8238 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8239 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8240 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8241 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8242 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8244 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8245 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8247 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8248 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8249 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8253 fn test_configured_holder_selected_channel_reserve_satoshis() {
8255 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8256 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8257 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8259 // Test with valid but unreasonably high channel reserves
8260 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8261 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8262 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8264 // Test with calculated channel reserve less than lower bound
8265 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8266 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8268 // Test with invalid channel reserves since sum of both is greater than or equal
8270 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8271 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8274 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8275 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8276 let logger = test_utils::TestLogger::new();
8277 let secp_ctx = Secp256k1::new();
8278 let seed = [42; 32];
8279 let network = Network::Testnet;
8280 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8281 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8282 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8285 let mut outbound_node_config = UserConfig::default();
8286 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8287 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8289 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8290 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8292 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8293 let mut inbound_node_config = UserConfig::default();
8294 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8296 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8297 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8299 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8301 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8302 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8304 // Channel Negotiations failed
8305 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8306 assert!(result.is_err());
8311 fn channel_update() {
8312 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8313 let logger = test_utils::TestLogger::new();
8314 let secp_ctx = Secp256k1::new();
8315 let seed = [42; 32];
8316 let network = Network::Testnet;
8317 let best_block = BestBlock::from_network(network);
8318 let chain_hash = ChainHash::using_genesis_block(network);
8319 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8321 // Create Node A's channel pointing to Node B's pubkey
8322 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8323 let config = UserConfig::default();
8324 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8326 // Create Node B's channel by receiving Node A's open_channel message
8327 // Make sure A's dust limit is as we expect.
8328 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8329 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8330 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8332 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8333 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8334 accept_channel_msg.dust_limit_satoshis = 546;
8335 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8336 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8338 // Node A --> Node B: funding created
8339 let output_script = node_a_chan.context.get_funding_redeemscript();
8340 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8341 value: 10000000, script_pubkey: output_script.clone(),
8343 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8344 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8345 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8347 // Node B --> Node A: funding signed
8348 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8350 // Make sure that receiving a channel update will update the Channel as expected.
8351 let update = ChannelUpdate {
8352 contents: UnsignedChannelUpdate {
8354 short_channel_id: 0,
8357 cltv_expiry_delta: 100,
8358 htlc_minimum_msat: 5,
8359 htlc_maximum_msat: MAX_VALUE_MSAT,
8361 fee_proportional_millionths: 11,
8362 excess_data: Vec::new(),
8364 signature: Signature::from(unsafe { FFISignature::new() })
8366 assert!(node_a_chan.channel_update(&update).unwrap());
8368 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8369 // change our official htlc_minimum_msat.
8370 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8371 match node_a_chan.context.counterparty_forwarding_info() {
8373 assert_eq!(info.cltv_expiry_delta, 100);
8374 assert_eq!(info.fee_base_msat, 110);
8375 assert_eq!(info.fee_proportional_millionths, 11);
8377 None => panic!("expected counterparty forwarding info to be Some")
8380 assert!(!node_a_chan.channel_update(&update).unwrap());
8383 #[cfg(feature = "_test_vectors")]
8385 fn outbound_commitment_test() {
8386 use bitcoin::sighash;
8387 use bitcoin::consensus::encode::serialize;
8388 use bitcoin::sighash::EcdsaSighashType;
8389 use bitcoin::hashes::hex::FromHex;
8390 use bitcoin::hash_types::Txid;
8391 use bitcoin::secp256k1::Message;
8392 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8393 use crate::ln::PaymentPreimage;
8394 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8395 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8396 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8397 use crate::util::logger::Logger;
8398 use crate::sync::Arc;
8399 use core::str::FromStr;
8400 use hex::DisplayHex;
8402 // Test vectors from BOLT 3 Appendices C and F (anchors):
8403 let feeest = TestFeeEstimator{fee_est: 15000};
8404 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8405 let secp_ctx = Secp256k1::new();
8407 let mut signer = InMemorySigner::new(
8409 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8410 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8411 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8412 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8413 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8415 // These aren't set in the test vectors:
8416 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8422 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8423 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8424 let keys_provider = Keys { signer: signer.clone() };
8426 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8427 let mut config = UserConfig::default();
8428 config.channel_handshake_config.announced_channel = false;
8429 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8430 chan.context.holder_dust_limit_satoshis = 546;
8431 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8433 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8435 let counterparty_pubkeys = ChannelPublicKeys {
8436 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8437 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8438 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8439 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8440 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8442 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8443 CounterpartyChannelTransactionParameters {
8444 pubkeys: counterparty_pubkeys.clone(),
8445 selected_contest_delay: 144
8447 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8448 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8450 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8451 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8453 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8454 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8456 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8457 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8459 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8460 // derived from a commitment_seed, so instead we copy it here and call
8461 // build_commitment_transaction.
8462 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8463 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8464 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8465 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8466 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8468 macro_rules! test_commitment {
8469 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8470 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8471 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8475 macro_rules! test_commitment_with_anchors {
8476 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8477 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8478 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8482 macro_rules! test_commitment_common {
8483 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8484 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8486 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8487 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8489 let htlcs = commitment_stats.htlcs_included.drain(..)
8490 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8492 (commitment_stats.tx, htlcs)
8494 let trusted_tx = commitment_tx.trust();
8495 let unsigned_tx = trusted_tx.built_transaction();
8496 let redeemscript = chan.context.get_funding_redeemscript();
8497 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8498 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8499 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8500 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8502 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8503 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8504 let mut counterparty_htlc_sigs = Vec::new();
8505 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8507 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8508 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8509 counterparty_htlc_sigs.push(remote_signature);
8511 assert_eq!(htlcs.len(), per_htlc.len());
8513 let holder_commitment_tx = HolderCommitmentTransaction::new(
8514 commitment_tx.clone(),
8515 counterparty_signature,
8516 counterparty_htlc_sigs,
8517 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8518 chan.context.counterparty_funding_pubkey()
8520 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8521 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8523 let funding_redeemscript = chan.context.get_funding_redeemscript();
8524 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8525 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8527 // ((htlc, counterparty_sig), (index, holder_sig))
8528 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8531 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8532 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8534 let ref htlc = htlcs[$htlc_idx];
8535 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8536 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8537 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8538 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8539 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8540 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8541 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8543 let mut preimage: Option<PaymentPreimage> = None;
8546 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8547 if out == htlc.payment_hash {
8548 preimage = Some(PaymentPreimage([i; 32]));
8552 assert!(preimage.is_some());
8555 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8556 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8557 channel_derivation_parameters: ChannelDerivationParameters {
8558 value_satoshis: chan.context.channel_value_satoshis,
8559 keys_id: chan.context.channel_keys_id,
8560 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8562 commitment_txid: trusted_tx.txid(),
8563 per_commitment_number: trusted_tx.commitment_number(),
8564 per_commitment_point: trusted_tx.per_commitment_point(),
8565 feerate_per_kw: trusted_tx.feerate_per_kw(),
8567 preimage: preimage.clone(),
8568 counterparty_sig: *htlc_counterparty_sig,
8569 }, &secp_ctx).unwrap();
8570 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8571 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8573 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8574 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8575 let trusted_tx = holder_commitment_tx.trust();
8576 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8577 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8578 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8580 assert!(htlc_counterparty_sig_iter.next().is_none());
8584 // anchors: simple commitment tx with no HTLCs and single anchor
8585 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8586 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8587 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8589 // simple commitment tx with no HTLCs
8590 chan.context.value_to_self_msat = 7000000000;
8592 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8593 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8594 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8596 // anchors: simple commitment tx with no HTLCs
8597 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8598 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8599 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8601 chan.context.pending_inbound_htlcs.push({
8602 let mut out = InboundHTLCOutput{
8604 amount_msat: 1000000,
8606 payment_hash: PaymentHash([0; 32]),
8607 state: InboundHTLCState::Committed,
8609 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8612 chan.context.pending_inbound_htlcs.push({
8613 let mut out = InboundHTLCOutput{
8615 amount_msat: 2000000,
8617 payment_hash: PaymentHash([0; 32]),
8618 state: InboundHTLCState::Committed,
8620 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8623 chan.context.pending_outbound_htlcs.push({
8624 let mut out = OutboundHTLCOutput{
8626 amount_msat: 2000000,
8628 payment_hash: PaymentHash([0; 32]),
8629 state: OutboundHTLCState::Committed,
8630 source: HTLCSource::dummy(),
8631 skimmed_fee_msat: None,
8633 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8636 chan.context.pending_outbound_htlcs.push({
8637 let mut out = OutboundHTLCOutput{
8639 amount_msat: 3000000,
8641 payment_hash: PaymentHash([0; 32]),
8642 state: OutboundHTLCState::Committed,
8643 source: HTLCSource::dummy(),
8644 skimmed_fee_msat: None,
8646 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8649 chan.context.pending_inbound_htlcs.push({
8650 let mut out = InboundHTLCOutput{
8652 amount_msat: 4000000,
8654 payment_hash: PaymentHash([0; 32]),
8655 state: InboundHTLCState::Committed,
8657 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8661 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8662 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8663 chan.context.feerate_per_kw = 0;
8665 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8666 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8667 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8670 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8671 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8672 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8675 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8676 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8677 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8680 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8681 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8682 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8685 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8686 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8687 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8690 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8691 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8692 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8695 // commitment tx with seven outputs untrimmed (maximum feerate)
8696 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8697 chan.context.feerate_per_kw = 647;
8699 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8700 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8701 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8704 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8705 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8706 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8709 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8710 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8711 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8714 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8715 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8716 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8719 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8720 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8721 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8724 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8725 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8726 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8729 // commitment tx with six outputs untrimmed (minimum feerate)
8730 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8731 chan.context.feerate_per_kw = 648;
8733 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8734 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8735 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8738 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8739 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8740 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8743 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8744 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8745 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8748 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8749 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8750 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8753 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8754 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8755 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8758 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8759 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8760 chan.context.feerate_per_kw = 645;
8761 chan.context.holder_dust_limit_satoshis = 1001;
8763 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8764 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8765 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8768 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8769 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8770 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8773 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8774 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8775 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8778 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8779 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8780 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8783 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8784 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8785 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8788 // commitment tx with six outputs untrimmed (maximum feerate)
8789 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8790 chan.context.feerate_per_kw = 2069;
8791 chan.context.holder_dust_limit_satoshis = 546;
8793 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8794 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8795 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8798 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8799 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8800 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8803 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8804 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8805 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8808 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8809 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8810 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8813 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8814 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8815 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8818 // commitment tx with five outputs untrimmed (minimum feerate)
8819 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8820 chan.context.feerate_per_kw = 2070;
8822 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8823 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8824 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8827 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8828 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8829 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8832 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8833 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8834 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8837 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8838 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8839 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8842 // commitment tx with five outputs untrimmed (maximum feerate)
8843 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8844 chan.context.feerate_per_kw = 2194;
8846 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8847 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8848 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8851 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8852 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8853 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8856 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8857 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8858 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8861 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8862 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8863 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8866 // commitment tx with four outputs untrimmed (minimum feerate)
8867 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8868 chan.context.feerate_per_kw = 2195;
8870 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8871 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8872 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8875 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8876 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8877 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8880 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8881 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8882 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8885 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8886 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8887 chan.context.feerate_per_kw = 2185;
8888 chan.context.holder_dust_limit_satoshis = 2001;
8889 let cached_channel_type = chan.context.channel_type;
8890 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8892 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8893 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8894 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8897 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8898 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8899 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8902 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8903 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8904 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8907 // commitment tx with four outputs untrimmed (maximum feerate)
8908 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8909 chan.context.feerate_per_kw = 3702;
8910 chan.context.holder_dust_limit_satoshis = 546;
8911 chan.context.channel_type = cached_channel_type.clone();
8913 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8914 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8915 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8918 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8919 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8920 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8923 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8924 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8925 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8928 // commitment tx with three outputs untrimmed (minimum feerate)
8929 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8930 chan.context.feerate_per_kw = 3703;
8932 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8933 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8934 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8937 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8938 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8939 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8942 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8943 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8944 chan.context.feerate_per_kw = 3687;
8945 chan.context.holder_dust_limit_satoshis = 3001;
8946 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8948 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8949 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8950 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8953 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8954 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8955 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8958 // commitment tx with three outputs untrimmed (maximum feerate)
8959 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8960 chan.context.feerate_per_kw = 4914;
8961 chan.context.holder_dust_limit_satoshis = 546;
8962 chan.context.channel_type = cached_channel_type.clone();
8964 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8965 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8966 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8969 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8970 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8971 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8974 // commitment tx with two outputs untrimmed (minimum feerate)
8975 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8976 chan.context.feerate_per_kw = 4915;
8977 chan.context.holder_dust_limit_satoshis = 546;
8979 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8980 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8981 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8983 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8984 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8985 chan.context.feerate_per_kw = 4894;
8986 chan.context.holder_dust_limit_satoshis = 4001;
8987 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8989 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8990 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8991 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8993 // commitment tx with two outputs untrimmed (maximum feerate)
8994 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8995 chan.context.feerate_per_kw = 9651180;
8996 chan.context.holder_dust_limit_satoshis = 546;
8997 chan.context.channel_type = cached_channel_type.clone();
8999 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9000 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9001 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9003 // commitment tx with one output untrimmed (minimum feerate)
9004 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9005 chan.context.feerate_per_kw = 9651181;
9007 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9008 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9009 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9011 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9012 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9013 chan.context.feerate_per_kw = 6216010;
9014 chan.context.holder_dust_limit_satoshis = 4001;
9015 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9017 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9018 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9019 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9021 // commitment tx with fee greater than funder amount
9022 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9023 chan.context.feerate_per_kw = 9651936;
9024 chan.context.holder_dust_limit_satoshis = 546;
9025 chan.context.channel_type = cached_channel_type;
9027 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9028 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9029 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9031 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9032 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9033 chan.context.feerate_per_kw = 253;
9034 chan.context.pending_inbound_htlcs.clear();
9035 chan.context.pending_inbound_htlcs.push({
9036 let mut out = InboundHTLCOutput{
9038 amount_msat: 2000000,
9040 payment_hash: PaymentHash([0; 32]),
9041 state: InboundHTLCState::Committed,
9043 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9046 chan.context.pending_outbound_htlcs.clear();
9047 chan.context.pending_outbound_htlcs.push({
9048 let mut out = OutboundHTLCOutput{
9050 amount_msat: 5000001,
9052 payment_hash: PaymentHash([0; 32]),
9053 state: OutboundHTLCState::Committed,
9054 source: HTLCSource::dummy(),
9055 skimmed_fee_msat: None,
9057 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9060 chan.context.pending_outbound_htlcs.push({
9061 let mut out = OutboundHTLCOutput{
9063 amount_msat: 5000000,
9065 payment_hash: PaymentHash([0; 32]),
9066 state: OutboundHTLCState::Committed,
9067 source: HTLCSource::dummy(),
9068 skimmed_fee_msat: None,
9070 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9074 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9075 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9076 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9079 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9080 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9081 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9083 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9084 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9085 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9087 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9088 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9089 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9092 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9093 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9094 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9095 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9098 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9099 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9100 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9102 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9103 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9104 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9106 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9107 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9108 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9113 fn test_per_commitment_secret_gen() {
9114 // Test vectors from BOLT 3 Appendix D:
9116 let mut seed = [0; 32];
9117 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9118 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9119 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9121 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9122 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9123 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9125 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9126 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9128 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9129 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9131 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9132 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9133 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9137 fn test_key_derivation() {
9138 // Test vectors from BOLT 3 Appendix E:
9139 let secp_ctx = Secp256k1::new();
9141 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9142 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9144 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9145 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9147 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9148 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9150 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9151 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9153 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9154 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9156 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9157 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9161 fn test_zero_conf_channel_type_support() {
9162 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9163 let secp_ctx = Secp256k1::new();
9164 let seed = [42; 32];
9165 let network = Network::Testnet;
9166 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9167 let logger = test_utils::TestLogger::new();
9169 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9170 let config = UserConfig::default();
9171 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9172 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9174 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9175 channel_type_features.set_zero_conf_required();
9177 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9178 open_channel_msg.channel_type = Some(channel_type_features);
9179 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9180 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9181 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9182 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9183 assert!(res.is_ok());
9187 fn test_supports_anchors_zero_htlc_tx_fee() {
9188 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9189 // resulting `channel_type`.
9190 let secp_ctx = Secp256k1::new();
9191 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9192 let network = Network::Testnet;
9193 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9194 let logger = test_utils::TestLogger::new();
9196 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9197 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9199 let mut config = UserConfig::default();
9200 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9202 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9203 // need to signal it.
9204 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9205 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9206 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9207 &config, 0, 42, None
9209 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9211 let mut expected_channel_type = ChannelTypeFeatures::empty();
9212 expected_channel_type.set_static_remote_key_required();
9213 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9215 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9216 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9217 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9221 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9222 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9223 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9224 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9225 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9228 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9229 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9233 fn test_rejects_implicit_simple_anchors() {
9234 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9235 // each side's `InitFeatures`, it is rejected.
9236 let secp_ctx = Secp256k1::new();
9237 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9238 let network = Network::Testnet;
9239 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9240 let logger = test_utils::TestLogger::new();
9242 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9243 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9245 let config = UserConfig::default();
9247 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9248 let static_remote_key_required: u64 = 1 << 12;
9249 let simple_anchors_required: u64 = 1 << 20;
9250 let raw_init_features = static_remote_key_required | simple_anchors_required;
9251 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9253 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9254 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9255 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9259 // Set `channel_type` to `None` to force the implicit feature negotiation.
9260 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9261 open_channel_msg.channel_type = None;
9263 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9264 // `static_remote_key`, it will fail the channel.
9265 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9266 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9267 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9268 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9270 assert!(channel_b.is_err());
9274 fn test_rejects_simple_anchors_channel_type() {
9275 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9277 let secp_ctx = Secp256k1::new();
9278 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9279 let network = Network::Testnet;
9280 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9281 let logger = test_utils::TestLogger::new();
9283 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9284 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9286 let config = UserConfig::default();
9288 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9289 let static_remote_key_required: u64 = 1 << 12;
9290 let simple_anchors_required: u64 = 1 << 20;
9291 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9292 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9293 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9294 assert!(!simple_anchors_init.requires_unknown_bits());
9295 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9297 // First, we'll try to open a channel between A and B where A requests a channel type for
9298 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9299 // B as it's not supported by LDK.
9300 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9301 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9302 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9306 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9307 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9309 let res = InboundV1Channel::<&TestKeysInterface>::new(
9310 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9311 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9312 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9314 assert!(res.is_err());
9316 // Then, we'll try to open another channel where A requests a channel type for
9317 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9318 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9320 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9321 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9322 10000000, 100000, 42, &config, 0, 42, None
9325 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9327 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9328 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9329 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9330 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9333 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9334 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9336 let res = channel_a.accept_channel(
9337 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9339 assert!(res.is_err());
9343 fn test_waiting_for_batch() {
9344 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9345 let logger = test_utils::TestLogger::new();
9346 let secp_ctx = Secp256k1::new();
9347 let seed = [42; 32];
9348 let network = Network::Testnet;
9349 let best_block = BestBlock::from_network(network);
9350 let chain_hash = ChainHash::using_genesis_block(network);
9351 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9353 let mut config = UserConfig::default();
9354 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9355 // channel in a batch before all channels are ready.
9356 config.channel_handshake_limits.trust_own_funding_0conf = true;
9358 // Create a channel from node a to node b that will be part of batch funding.
9359 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9360 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9365 &channelmanager::provided_init_features(&config),
9375 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9376 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9377 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9382 &channelmanager::provided_channel_type_features(&config),
9383 &channelmanager::provided_init_features(&config),
9389 true, // Allow node b to send a 0conf channel_ready.
9392 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9393 node_a_chan.accept_channel(
9394 &accept_channel_msg,
9395 &config.channel_handshake_limits,
9396 &channelmanager::provided_init_features(&config),
9399 // Fund the channel with a batch funding transaction.
9400 let output_script = node_a_chan.context.get_funding_redeemscript();
9401 let tx = Transaction {
9403 lock_time: LockTime::ZERO,
9407 value: 10000000, script_pubkey: output_script.clone(),
9410 value: 10000000, script_pubkey: Builder::new().into_script(),
9413 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9414 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9419 ).map_err(|_| ()).unwrap();
9420 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9421 &funding_created_msg.unwrap(),
9425 ).map_err(|_| ()).unwrap();
9426 let node_b_updates = node_b_chan.monitor_updating_restored(
9434 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9435 // broadcasting the funding transaction until the batch is ready.
9436 let _ = node_a_chan.funding_signed(
9437 &funding_signed_msg.unwrap(),
9442 let node_a_updates = node_a_chan.monitor_updating_restored(
9449 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9450 // as the funding transaction depends on all channels in the batch becoming ready.
9451 assert!(node_a_updates.channel_ready.is_none());
9452 assert!(node_a_updates.funding_broadcastable.is_none());
9454 node_a_chan.context.channel_state,
9455 ChannelState::FundingSent as u32 |
9456 ChannelState::WaitingForBatch as u32,
9459 // It is possible to receive a 0conf channel_ready from the remote node.
9460 node_a_chan.channel_ready(
9461 &node_b_updates.channel_ready.unwrap(),
9469 node_a_chan.context.channel_state,
9470 ChannelState::FundingSent as u32 |
9471 ChannelState::WaitingForBatch as u32 |
9472 ChannelState::TheirChannelReady as u32,
9475 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9476 node_a_chan.set_batch_ready();
9478 node_a_chan.context.channel_state,
9479 ChannelState::FundingSent as u32 |
9480 ChannelState::TheirChannelReady as u32,
9482 assert!(node_a_chan.check_get_channel_ready(0).is_some());