1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::Logger;
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 enum OutboundHTLCState {
170 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
171 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
172 /// we will promote to Committed (note that they may not accept it until the next time we
173 /// revoke, but we don't really care about that:
174 /// * they've revoked, so worst case we can announce an old state and get our (option on)
175 /// money back (though we won't), and,
176 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
177 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
178 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
179 /// we'll never get out of sync).
180 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
181 /// OutboundHTLCOutput's size just for a temporary bit
182 LocalAnnounced(Box<msgs::OnionPacket>),
184 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
185 /// the change (though they'll need to revoke before we fail the payment).
186 RemoteRemoved(OutboundHTLCOutcome),
187 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
188 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
189 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
190 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
191 /// remote revoke_and_ack on a previous state before we can do so.
192 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
193 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
194 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
195 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
196 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
197 /// revoke_and_ack to drop completely.
198 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
202 enum OutboundHTLCOutcome {
203 /// LDK version 0.0.105+ will always fill in the preimage here.
204 Success(Option<PaymentPreimage>),
205 Failure(HTLCFailReason),
208 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
209 fn from(o: Option<HTLCFailReason>) -> Self {
211 None => OutboundHTLCOutcome::Success(None),
212 Some(r) => OutboundHTLCOutcome::Failure(r)
217 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
218 fn into(self) -> Option<&'a HTLCFailReason> {
220 OutboundHTLCOutcome::Success(_) => None,
221 OutboundHTLCOutcome::Failure(ref r) => Some(r)
226 struct OutboundHTLCOutput {
230 payment_hash: PaymentHash,
231 state: OutboundHTLCState,
233 blinding_point: Option<PublicKey>,
234 skimmed_fee_msat: Option<u64>,
237 /// See AwaitingRemoteRevoke ChannelState for more info
238 enum HTLCUpdateAwaitingACK {
239 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
243 payment_hash: PaymentHash,
245 onion_routing_packet: msgs::OnionPacket,
246 // The extra fee we're skimming off the top of this HTLC.
247 skimmed_fee_msat: Option<u64>,
248 blinding_point: Option<PublicKey>,
251 payment_preimage: PaymentPreimage,
256 err_packet: msgs::OnionErrorPacket,
260 /// There are a few "states" and then a number of flags which can be applied:
261 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
262 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
263 /// move on to `ChannelReady`.
264 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
265 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
266 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
268 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
269 OurInitSent = 1 << 0,
270 /// Implies we have received their `open_channel`/`accept_channel` message
271 TheirInitSent = 1 << 1,
272 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
273 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
274 /// upon receipt of `funding_created`, so simply skip this state.
276 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
277 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
278 /// and our counterparty consider the funding transaction confirmed.
280 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
281 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
282 TheirChannelReady = 1 << 4,
283 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
284 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
285 OurChannelReady = 1 << 5,
287 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
288 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
290 PeerDisconnected = 1 << 7,
291 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
292 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
293 /// sending any outbound messages until they've managed to finish.
294 MonitorUpdateInProgress = 1 << 8,
295 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
296 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
297 /// messages as then we will be unable to determine which HTLCs they included in their
298 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
300 /// Flag is set on `ChannelReady`.
301 AwaitingRemoteRevoke = 1 << 9,
302 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
303 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
304 /// to respond with our own shutdown message when possible.
305 RemoteShutdownSent = 1 << 10,
306 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
307 /// point, we may not add any new HTLCs to the channel.
308 LocalShutdownSent = 1 << 11,
309 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
310 /// to drop us, but we store this anyway.
311 ShutdownComplete = 4096,
312 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
313 /// broadcasting of the funding transaction is being held until all channels in the batch
314 /// have received funding_signed and have their monitors persisted.
315 WaitingForBatch = 1 << 13,
317 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
318 ChannelState::LocalShutdownSent as u32 |
319 ChannelState::RemoteShutdownSent as u32;
320 const MULTI_STATE_FLAGS: u32 =
321 BOTH_SIDES_SHUTDOWN_MASK |
322 ChannelState::PeerDisconnected as u32 |
323 ChannelState::MonitorUpdateInProgress as u32;
324 const STATE_FLAGS: u32 =
326 ChannelState::TheirChannelReady as u32 |
327 ChannelState::OurChannelReady as u32 |
328 ChannelState::AwaitingRemoteRevoke as u32 |
329 ChannelState::WaitingForBatch as u32;
331 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
333 pub const DEFAULT_MAX_HTLCS: u16 = 50;
335 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
336 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
337 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
338 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
342 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
344 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
346 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
348 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
349 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
350 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
351 /// `holder_max_htlc_value_in_flight_msat`.
352 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
354 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
355 /// `option_support_large_channel` (aka wumbo channels) is not supported.
357 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
359 /// Total bitcoin supply in satoshis.
360 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
362 /// The maximum network dust limit for standard script formats. This currently represents the
363 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
364 /// transaction non-standard and thus refuses to relay it.
365 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
366 /// implementations use this value for their dust limit today.
367 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
369 /// The maximum channel dust limit we will accept from our counterparty.
370 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
372 /// The dust limit is used for both the commitment transaction outputs as well as the closing
373 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
374 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
375 /// In order to avoid having to concern ourselves with standardness during the closing process, we
376 /// simply require our counterparty to use a dust limit which will leave any segwit output
378 /// See <https://github.com/lightning/bolts/issues/905> for more details.
379 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
381 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
382 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
384 /// Used to return a simple Error back to ChannelManager. Will get converted to a
385 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
386 /// channel_id in ChannelManager.
387 pub(super) enum ChannelError {
393 impl fmt::Debug for ChannelError {
394 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
396 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
397 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
398 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
403 impl fmt::Display for ChannelError {
404 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
406 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
407 &ChannelError::Warn(ref e) => write!(f, "{}", e),
408 &ChannelError::Close(ref e) => write!(f, "{}", e),
413 macro_rules! secp_check {
414 ($res: expr, $err: expr) => {
417 Err(_) => return Err(ChannelError::Close($err)),
422 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
423 /// our counterparty or not. However, we don't want to announce updates right away to avoid
424 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
425 /// our channel_update message and track the current state here.
426 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
427 #[derive(Clone, Copy, PartialEq)]
428 pub(super) enum ChannelUpdateStatus {
429 /// We've announced the channel as enabled and are connected to our peer.
431 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
433 /// Our channel is live again, but we haven't announced the channel as enabled yet.
435 /// We've announced the channel as disabled.
439 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
441 pub enum AnnouncementSigsState {
442 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
443 /// we sent the last `AnnouncementSignatures`.
445 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
446 /// This state never appears on disk - instead we write `NotSent`.
448 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
449 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
450 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
451 /// they send back a `RevokeAndACK`.
452 /// This state never appears on disk - instead we write `NotSent`.
454 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
455 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
459 /// An enum indicating whether the local or remote side offered a given HTLC.
465 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
468 pending_htlcs_value_msat: u64,
469 on_counterparty_tx_dust_exposure_msat: u64,
470 on_holder_tx_dust_exposure_msat: u64,
471 holding_cell_msat: u64,
472 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
475 /// An enum gathering stats on commitment transaction, either local or remote.
476 struct CommitmentStats<'a> {
477 tx: CommitmentTransaction, // the transaction info
478 feerate_per_kw: u32, // the feerate included to build the transaction
479 total_fee_sat: u64, // the total fee included in the transaction
480 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
481 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
482 local_balance_msat: u64, // local balance before fees but considering dust limits
483 remote_balance_msat: u64, // remote balance before fees but considering dust limits
484 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
487 /// Used when calculating whether we or the remote can afford an additional HTLC.
488 struct HTLCCandidate {
490 origin: HTLCInitiator,
494 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
502 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
504 enum UpdateFulfillFetch {
506 monitor_update: ChannelMonitorUpdate,
507 htlc_value_msat: u64,
508 msg: Option<msgs::UpdateFulfillHTLC>,
513 /// The return type of get_update_fulfill_htlc_and_commit.
514 pub enum UpdateFulfillCommitFetch {
515 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
516 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
517 /// previously placed in the holding cell (and has since been removed).
519 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
520 monitor_update: ChannelMonitorUpdate,
521 /// The value of the HTLC which was claimed, in msat.
522 htlc_value_msat: u64,
524 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
525 /// or has been forgotten (presumably previously claimed).
529 /// The return value of `monitor_updating_restored`
530 pub(super) struct MonitorRestoreUpdates {
531 pub raa: Option<msgs::RevokeAndACK>,
532 pub commitment_update: Option<msgs::CommitmentUpdate>,
533 pub order: RAACommitmentOrder,
534 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
535 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
536 pub finalized_claimed_htlcs: Vec<HTLCSource>,
537 pub funding_broadcastable: Option<Transaction>,
538 pub channel_ready: Option<msgs::ChannelReady>,
539 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
542 /// The return value of `signer_maybe_unblocked`
544 pub(super) struct SignerResumeUpdates {
545 pub commitment_update: Option<msgs::CommitmentUpdate>,
546 pub funding_signed: Option<msgs::FundingSigned>,
547 pub funding_created: Option<msgs::FundingCreated>,
548 pub channel_ready: Option<msgs::ChannelReady>,
551 /// The return value of `channel_reestablish`
552 pub(super) struct ReestablishResponses {
553 pub channel_ready: Option<msgs::ChannelReady>,
554 pub raa: Option<msgs::RevokeAndACK>,
555 pub commitment_update: Option<msgs::CommitmentUpdate>,
556 pub order: RAACommitmentOrder,
557 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
558 pub shutdown_msg: Option<msgs::Shutdown>,
561 /// The result of a shutdown that should be handled.
563 pub(crate) struct ShutdownResult {
564 /// A channel monitor update to apply.
565 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
566 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
567 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
568 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
569 /// propagated to the remainder of the batch.
570 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
573 /// If the majority of the channels funds are to the fundee and the initiator holds only just
574 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
575 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
576 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
577 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
578 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
579 /// by this multiple without hitting this case, before sending.
580 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
581 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
582 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
583 /// leave the channel less usable as we hold a bigger reserve.
584 #[cfg(any(fuzzing, test))]
585 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
586 #[cfg(not(any(fuzzing, test)))]
587 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
589 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
590 /// channel creation on an inbound channel, we simply force-close and move on.
591 /// This constant is the one suggested in BOLT 2.
592 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
594 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
595 /// not have enough balance value remaining to cover the onchain cost of this new
596 /// HTLC weight. If this happens, our counterparty fails the reception of our
597 /// commitment_signed including this new HTLC due to infringement on the channel
599 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
600 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
601 /// leads to a channel force-close. Ultimately, this is an issue coming from the
602 /// design of LN state machines, allowing asynchronous updates.
603 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
605 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
606 /// commitment transaction fees, with at least this many HTLCs present on the commitment
607 /// transaction (not counting the value of the HTLCs themselves).
608 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
610 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
611 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
612 /// ChannelUpdate prompted by the config update. This value was determined as follows:
614 /// * The expected interval between ticks (1 minute).
615 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
616 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
617 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
618 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
620 /// The number of ticks that may elapse while we're waiting for a response to a
621 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
624 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
625 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
627 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
628 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
629 /// exceeding this age limit will be force-closed and purged from memory.
630 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
632 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
633 pub(crate) const COINBASE_MATURITY: u32 = 100;
635 struct PendingChannelMonitorUpdate {
636 update: ChannelMonitorUpdate,
639 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
640 (0, update, required),
643 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
644 /// its variants containing an appropriate channel struct.
645 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
646 UnfundedOutboundV1(OutboundV1Channel<SP>),
647 UnfundedInboundV1(InboundV1Channel<SP>),
651 impl<'a, SP: Deref> ChannelPhase<SP> where
652 SP::Target: SignerProvider,
653 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
655 pub fn context(&'a self) -> &'a ChannelContext<SP> {
657 ChannelPhase::Funded(chan) => &chan.context,
658 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
659 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
663 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
665 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
666 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
667 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
672 /// Contains all state common to unfunded inbound/outbound channels.
673 pub(super) struct UnfundedChannelContext {
674 /// A counter tracking how many ticks have elapsed since this unfunded channel was
675 /// created. If this unfunded channel reaches peer has yet to respond after reaching
676 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
678 /// This is so that we don't keep channels around that haven't progressed to a funded state
679 /// in a timely manner.
680 unfunded_channel_age_ticks: usize,
683 impl UnfundedChannelContext {
684 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
685 /// having reached the unfunded channel age limit.
687 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
688 pub fn should_expire_unfunded_channel(&mut self) -> bool {
689 self.unfunded_channel_age_ticks += 1;
690 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
694 /// Contains everything about the channel including state, and various flags.
695 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
696 config: LegacyChannelConfig,
698 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
699 // constructed using it. The second element in the tuple corresponds to the number of ticks that
700 // have elapsed since the update occurred.
701 prev_config: Option<(ChannelConfig, usize)>,
703 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
707 /// The current channel ID.
708 channel_id: ChannelId,
709 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
710 /// Will be `None` for channels created prior to 0.0.115.
711 temporary_channel_id: Option<ChannelId>,
714 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
715 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
717 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
718 // Note that a number of our tests were written prior to the behavior here which retransmits
719 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
721 #[cfg(any(test, feature = "_test_utils"))]
722 pub(crate) announcement_sigs_state: AnnouncementSigsState,
723 #[cfg(not(any(test, feature = "_test_utils")))]
724 announcement_sigs_state: AnnouncementSigsState,
726 secp_ctx: Secp256k1<secp256k1::All>,
727 channel_value_satoshis: u64,
729 latest_monitor_update_id: u64,
731 holder_signer: ChannelSignerType<SP>,
732 shutdown_scriptpubkey: Option<ShutdownScript>,
733 destination_script: ScriptBuf,
735 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
736 // generation start at 0 and count up...this simplifies some parts of implementation at the
737 // cost of others, but should really just be changed.
739 cur_holder_commitment_transaction_number: u64,
740 cur_counterparty_commitment_transaction_number: u64,
741 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
742 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
743 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
744 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
746 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
747 /// need to ensure we resend them in the order we originally generated them. Note that because
748 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
749 /// sufficient to simply set this to the opposite of any message we are generating as we
750 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
751 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
753 resend_order: RAACommitmentOrder,
755 monitor_pending_channel_ready: bool,
756 monitor_pending_revoke_and_ack: bool,
757 monitor_pending_commitment_signed: bool,
759 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
760 // responsible for some of the HTLCs here or not - we don't know whether the update in question
761 // completed or not. We currently ignore these fields entirely when force-closing a channel,
762 // but need to handle this somehow or we run the risk of losing HTLCs!
763 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
764 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
765 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
767 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
768 /// but our signer (initially) refused to give us a signature, we should retry at some point in
769 /// the future when the signer indicates it may have a signature for us.
771 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
772 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
773 signer_pending_commitment_update: bool,
774 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
775 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
776 /// outbound or inbound.
777 signer_pending_funding: bool,
779 // pending_update_fee is filled when sending and receiving update_fee.
781 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
782 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
783 // generating new commitment transactions with exactly the same criteria as inbound/outbound
784 // HTLCs with similar state.
785 pending_update_fee: Option<(u32, FeeUpdateState)>,
786 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
787 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
788 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
789 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
790 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
791 holding_cell_update_fee: Option<u32>,
792 next_holder_htlc_id: u64,
793 next_counterparty_htlc_id: u64,
796 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
797 /// when the channel is updated in ways which may impact the `channel_update` message or when a
798 /// new block is received, ensuring it's always at least moderately close to the current real
800 update_time_counter: u32,
802 #[cfg(debug_assertions)]
803 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
804 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
805 #[cfg(debug_assertions)]
806 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
807 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
809 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
810 target_closing_feerate_sats_per_kw: Option<u32>,
812 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
813 /// update, we need to delay processing it until later. We do that here by simply storing the
814 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
815 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
817 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
818 /// transaction. These are set once we reach `closing_negotiation_ready`.
820 pub(crate) closing_fee_limits: Option<(u64, u64)>,
822 closing_fee_limits: Option<(u64, u64)>,
824 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
825 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
826 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
827 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
828 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
830 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
831 /// until we see a `commitment_signed` before doing so.
833 /// We don't bother to persist this - we anticipate this state won't last longer than a few
834 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
835 expecting_peer_commitment_signed: bool,
837 /// The hash of the block in which the funding transaction was included.
838 funding_tx_confirmed_in: Option<BlockHash>,
839 funding_tx_confirmation_height: u32,
840 short_channel_id: Option<u64>,
841 /// Either the height at which this channel was created or the height at which it was last
842 /// serialized if it was serialized by versions prior to 0.0.103.
843 /// We use this to close if funding is never broadcasted.
844 channel_creation_height: u32,
846 counterparty_dust_limit_satoshis: u64,
849 pub(super) holder_dust_limit_satoshis: u64,
851 holder_dust_limit_satoshis: u64,
854 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
856 counterparty_max_htlc_value_in_flight_msat: u64,
859 pub(super) holder_max_htlc_value_in_flight_msat: u64,
861 holder_max_htlc_value_in_flight_msat: u64,
863 /// minimum channel reserve for self to maintain - set by them.
864 counterparty_selected_channel_reserve_satoshis: Option<u64>,
867 pub(super) holder_selected_channel_reserve_satoshis: u64,
869 holder_selected_channel_reserve_satoshis: u64,
871 counterparty_htlc_minimum_msat: u64,
872 holder_htlc_minimum_msat: u64,
874 pub counterparty_max_accepted_htlcs: u16,
876 counterparty_max_accepted_htlcs: u16,
877 holder_max_accepted_htlcs: u16,
878 minimum_depth: Option<u32>,
880 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
882 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
883 funding_transaction: Option<Transaction>,
884 is_batch_funding: Option<()>,
886 counterparty_cur_commitment_point: Option<PublicKey>,
887 counterparty_prev_commitment_point: Option<PublicKey>,
888 counterparty_node_id: PublicKey,
890 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
892 commitment_secrets: CounterpartyCommitmentSecrets,
894 channel_update_status: ChannelUpdateStatus,
895 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
896 /// not complete within a single timer tick (one minute), we should force-close the channel.
897 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
899 /// Note that this field is reset to false on deserialization to give us a chance to connect to
900 /// our peer and start the closing_signed negotiation fresh.
901 closing_signed_in_flight: bool,
903 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
904 /// This can be used to rebroadcast the channel_announcement message later.
905 announcement_sigs: Option<(Signature, Signature)>,
907 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
908 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
909 // be, by comparing the cached values to the fee of the tranaction generated by
910 // `build_commitment_transaction`.
911 #[cfg(any(test, fuzzing))]
912 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
913 #[cfg(any(test, fuzzing))]
914 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
916 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
917 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
918 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
919 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
920 /// message until we receive a channel_reestablish.
922 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
923 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
925 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
926 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
927 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
928 /// unblock the state machine.
930 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
931 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
932 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
934 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
935 /// [`msgs::RevokeAndACK`] message from the counterparty.
936 sent_message_awaiting_response: Option<usize>,
938 #[cfg(any(test, fuzzing))]
939 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
940 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
941 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
942 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
943 // is fine, but as a sanity check in our failure to generate the second claim, we check here
944 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
945 historical_inbound_htlc_fulfills: HashSet<u64>,
947 /// This channel's type, as negotiated during channel open
948 channel_type: ChannelTypeFeatures,
950 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
951 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
952 // the channel's funding UTXO.
954 // We also use this when sending our peer a channel_update that isn't to be broadcasted
955 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
956 // associated channel mapping.
958 // We only bother storing the most recent SCID alias at any time, though our counterparty has
959 // to store all of them.
960 latest_inbound_scid_alias: Option<u64>,
962 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
963 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
964 // don't currently support node id aliases and eventually privacy should be provided with
965 // blinded paths instead of simple scid+node_id aliases.
966 outbound_scid_alias: u64,
968 // We track whether we already emitted a `ChannelPending` event.
969 channel_pending_event_emitted: bool,
971 // We track whether we already emitted a `ChannelReady` event.
972 channel_ready_event_emitted: bool,
974 /// The unique identifier used to re-derive the private key material for the channel through
975 /// [`SignerProvider::derive_channel_signer`].
976 channel_keys_id: [u8; 32],
978 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
979 /// store it here and only release it to the `ChannelManager` once it asks for it.
980 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
983 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
984 /// Allowed in any state (including after shutdown)
985 pub fn get_update_time_counter(&self) -> u32 {
986 self.update_time_counter
989 pub fn get_latest_monitor_update_id(&self) -> u64 {
990 self.latest_monitor_update_id
993 pub fn should_announce(&self) -> bool {
994 self.config.announced_channel
997 pub fn is_outbound(&self) -> bool {
998 self.channel_transaction_parameters.is_outbound_from_holder
1001 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1002 /// Allowed in any state (including after shutdown)
1003 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1004 self.config.options.forwarding_fee_base_msat
1007 /// Returns true if we've ever received a message from the remote end for this Channel
1008 pub fn have_received_message(&self) -> bool {
1009 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1012 /// Returns true if this channel is fully established and not known to be closing.
1013 /// Allowed in any state (including after shutdown)
1014 pub fn is_usable(&self) -> bool {
1015 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1016 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1019 /// shutdown state returns the state of the channel in its various stages of shutdown
1020 pub fn shutdown_state(&self) -> ChannelShutdownState {
1021 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1022 return ChannelShutdownState::ShutdownComplete;
1024 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1025 return ChannelShutdownState::ShutdownInitiated;
1027 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1028 return ChannelShutdownState::ResolvingHTLCs;
1030 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1031 return ChannelShutdownState::NegotiatingClosingFee;
1033 return ChannelShutdownState::NotShuttingDown;
1036 fn closing_negotiation_ready(&self) -> bool {
1037 self.pending_inbound_htlcs.is_empty() &&
1038 self.pending_outbound_htlcs.is_empty() &&
1039 self.pending_update_fee.is_none() &&
1040 self.channel_state &
1041 (BOTH_SIDES_SHUTDOWN_MASK |
1042 ChannelState::AwaitingRemoteRevoke as u32 |
1043 ChannelState::PeerDisconnected as u32 |
1044 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1047 /// Returns true if this channel is currently available for use. This is a superset of
1048 /// is_usable() and considers things like the channel being temporarily disabled.
1049 /// Allowed in any state (including after shutdown)
1050 pub fn is_live(&self) -> bool {
1051 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1054 // Public utilities:
1056 pub fn channel_id(&self) -> ChannelId {
1060 // Return the `temporary_channel_id` used during channel establishment.
1062 // Will return `None` for channels created prior to LDK version 0.0.115.
1063 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1064 self.temporary_channel_id
1067 pub fn minimum_depth(&self) -> Option<u32> {
1071 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1072 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1073 pub fn get_user_id(&self) -> u128 {
1077 /// Gets the channel's type
1078 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1082 /// Gets the channel's `short_channel_id`.
1084 /// Will return `None` if the channel hasn't been confirmed yet.
1085 pub fn get_short_channel_id(&self) -> Option<u64> {
1086 self.short_channel_id
1089 /// Allowed in any state (including after shutdown)
1090 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1091 self.latest_inbound_scid_alias
1094 /// Allowed in any state (including after shutdown)
1095 pub fn outbound_scid_alias(&self) -> u64 {
1096 self.outbound_scid_alias
1099 /// Returns the holder signer for this channel.
1101 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1102 return &self.holder_signer
1105 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1106 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1107 /// or prior to any channel actions during `Channel` initialization.
1108 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1109 debug_assert_eq!(self.outbound_scid_alias, 0);
1110 self.outbound_scid_alias = outbound_scid_alias;
1113 /// Returns the funding_txo we either got from our peer, or were given by
1114 /// get_funding_created.
1115 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1116 self.channel_transaction_parameters.funding_outpoint
1119 /// Returns the height in which our funding transaction was confirmed.
1120 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1121 let conf_height = self.funding_tx_confirmation_height;
1122 if conf_height > 0 {
1129 /// Returns the block hash in which our funding transaction was confirmed.
1130 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1131 self.funding_tx_confirmed_in
1134 /// Returns the current number of confirmations on the funding transaction.
1135 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1136 if self.funding_tx_confirmation_height == 0 {
1137 // We either haven't seen any confirmation yet, or observed a reorg.
1141 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1144 fn get_holder_selected_contest_delay(&self) -> u16 {
1145 self.channel_transaction_parameters.holder_selected_contest_delay
1148 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1149 &self.channel_transaction_parameters.holder_pubkeys
1152 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1153 self.channel_transaction_parameters.counterparty_parameters
1154 .as_ref().map(|params| params.selected_contest_delay)
1157 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1158 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1161 /// Allowed in any state (including after shutdown)
1162 pub fn get_counterparty_node_id(&self) -> PublicKey {
1163 self.counterparty_node_id
1166 /// Allowed in any state (including after shutdown)
1167 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1168 self.holder_htlc_minimum_msat
1171 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1172 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1173 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1176 /// Allowed in any state (including after shutdown)
1177 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1179 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1180 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1181 // channel might have been used to route very small values (either by honest users or as DoS).
1182 self.channel_value_satoshis * 1000 * 9 / 10,
1184 self.counterparty_max_htlc_value_in_flight_msat
1188 /// Allowed in any state (including after shutdown)
1189 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1190 self.counterparty_htlc_minimum_msat
1193 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1194 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1195 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1198 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1199 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1200 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1202 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1203 party_max_htlc_value_in_flight_msat
1208 pub fn get_value_satoshis(&self) -> u64 {
1209 self.channel_value_satoshis
1212 pub fn get_fee_proportional_millionths(&self) -> u32 {
1213 self.config.options.forwarding_fee_proportional_millionths
1216 pub fn get_cltv_expiry_delta(&self) -> u16 {
1217 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1220 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1221 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1222 where F::Target: FeeEstimator
1224 match self.config.options.max_dust_htlc_exposure {
1225 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1226 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1227 ConfirmationTarget::OnChainSweep) as u64;
1228 feerate_per_kw.saturating_mul(multiplier)
1230 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1234 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1235 pub fn prev_config(&self) -> Option<ChannelConfig> {
1236 self.prev_config.map(|prev_config| prev_config.0)
1239 // Checks whether we should emit a `ChannelPending` event.
1240 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1241 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1244 // Returns whether we already emitted a `ChannelPending` event.
1245 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1246 self.channel_pending_event_emitted
1249 // Remembers that we already emitted a `ChannelPending` event.
1250 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1251 self.channel_pending_event_emitted = true;
1254 // Checks whether we should emit a `ChannelReady` event.
1255 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1256 self.is_usable() && !self.channel_ready_event_emitted
1259 // Remembers that we already emitted a `ChannelReady` event.
1260 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1261 self.channel_ready_event_emitted = true;
1264 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1265 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1266 /// no longer be considered when forwarding HTLCs.
1267 pub fn maybe_expire_prev_config(&mut self) {
1268 if self.prev_config.is_none() {
1271 let prev_config = self.prev_config.as_mut().unwrap();
1273 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1274 self.prev_config = None;
1278 /// Returns the current [`ChannelConfig`] applied to the channel.
1279 pub fn config(&self) -> ChannelConfig {
1283 /// Updates the channel's config. A bool is returned indicating whether the config update
1284 /// applied resulted in a new ChannelUpdate message.
1285 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1286 let did_channel_update =
1287 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1288 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1289 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1290 if did_channel_update {
1291 self.prev_config = Some((self.config.options, 0));
1292 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1293 // policy change to propagate throughout the network.
1294 self.update_time_counter += 1;
1296 self.config.options = *config;
1300 /// Returns true if funding_signed was sent/received and the
1301 /// funding transaction has been broadcast if necessary.
1302 pub fn is_funding_broadcast(&self) -> bool {
1303 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1304 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1307 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1308 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1309 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1310 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1311 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1313 /// @local is used only to convert relevant internal structures which refer to remote vs local
1314 /// to decide value of outputs and direction of HTLCs.
1315 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1316 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1317 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1318 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1319 /// which peer generated this transaction and "to whom" this transaction flows.
1321 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1322 where L::Target: Logger
1324 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1325 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1326 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1328 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1329 let mut remote_htlc_total_msat = 0;
1330 let mut local_htlc_total_msat = 0;
1331 let mut value_to_self_msat_offset = 0;
1333 let mut feerate_per_kw = self.feerate_per_kw;
1334 if let Some((feerate, update_state)) = self.pending_update_fee {
1335 if match update_state {
1336 // Note that these match the inclusion criteria when scanning
1337 // pending_inbound_htlcs below.
1338 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1339 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1340 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1342 feerate_per_kw = feerate;
1346 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1347 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1348 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1350 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1352 macro_rules! get_htlc_in_commitment {
1353 ($htlc: expr, $offered: expr) => {
1354 HTLCOutputInCommitment {
1356 amount_msat: $htlc.amount_msat,
1357 cltv_expiry: $htlc.cltv_expiry,
1358 payment_hash: $htlc.payment_hash,
1359 transaction_output_index: None
1364 macro_rules! add_htlc_output {
1365 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1366 if $outbound == local { // "offered HTLC output"
1367 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1368 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1371 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1373 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1374 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1375 included_non_dust_htlcs.push((htlc_in_tx, $source));
1377 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1378 included_dust_htlcs.push((htlc_in_tx, $source));
1381 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1382 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1385 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1387 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1388 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1389 included_non_dust_htlcs.push((htlc_in_tx, $source));
1391 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1392 included_dust_htlcs.push((htlc_in_tx, $source));
1398 for ref htlc in self.pending_inbound_htlcs.iter() {
1399 let (include, state_name) = match htlc.state {
1400 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1401 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1402 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1403 InboundHTLCState::Committed => (true, "Committed"),
1404 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1408 add_htlc_output!(htlc, false, None, state_name);
1409 remote_htlc_total_msat += htlc.amount_msat;
1411 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1413 &InboundHTLCState::LocalRemoved(ref reason) => {
1414 if generated_by_local {
1415 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1416 value_to_self_msat_offset += htlc.amount_msat as i64;
1425 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1427 for ref htlc in self.pending_outbound_htlcs.iter() {
1428 let (include, state_name) = match htlc.state {
1429 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1430 OutboundHTLCState::Committed => (true, "Committed"),
1431 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1432 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1433 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1436 let preimage_opt = match htlc.state {
1437 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1438 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1439 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1443 if let Some(preimage) = preimage_opt {
1444 preimages.push(preimage);
1448 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1449 local_htlc_total_msat += htlc.amount_msat;
1451 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1453 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1454 value_to_self_msat_offset -= htlc.amount_msat as i64;
1456 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1457 if !generated_by_local {
1458 value_to_self_msat_offset -= htlc.amount_msat as i64;
1466 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1467 assert!(value_to_self_msat >= 0);
1468 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1469 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1470 // "violate" their reserve value by couting those against it. Thus, we have to convert
1471 // everything to i64 before subtracting as otherwise we can overflow.
1472 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1473 assert!(value_to_remote_msat >= 0);
1475 #[cfg(debug_assertions)]
1477 // Make sure that the to_self/to_remote is always either past the appropriate
1478 // channel_reserve *or* it is making progress towards it.
1479 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1480 self.holder_max_commitment_tx_output.lock().unwrap()
1482 self.counterparty_max_commitment_tx_output.lock().unwrap()
1484 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1485 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1486 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1487 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1490 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1491 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1492 let (value_to_self, value_to_remote) = if self.is_outbound() {
1493 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1495 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1498 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1499 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1500 let (funding_pubkey_a, funding_pubkey_b) = if local {
1501 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1503 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1506 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1507 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1512 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1513 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1518 let num_nondust_htlcs = included_non_dust_htlcs.len();
1520 let channel_parameters =
1521 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1522 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1523 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1530 &mut included_non_dust_htlcs,
1533 let mut htlcs_included = included_non_dust_htlcs;
1534 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1535 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1536 htlcs_included.append(&mut included_dust_htlcs);
1538 // For the stats, trimmed-to-0 the value in msats accordingly
1539 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1540 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1548 local_balance_msat: value_to_self_msat as u64,
1549 remote_balance_msat: value_to_remote_msat as u64,
1555 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1556 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1557 /// our counterparty!)
1558 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1559 /// TODO Some magic rust shit to compile-time check this?
1560 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1561 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1562 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1563 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1564 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1566 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1570 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1571 /// will sign and send to our counterparty.
1572 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1573 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1574 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1575 //may see payments to it!
1576 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1577 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1578 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1580 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1583 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1584 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1585 /// Panics if called before accept_channel/InboundV1Channel::new
1586 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1587 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1590 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1591 &self.get_counterparty_pubkeys().funding_pubkey
1594 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1598 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1599 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1600 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1601 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1602 // more dust balance if the feerate increases when we have several HTLCs pending
1603 // which are near the dust limit.
1604 let mut feerate_per_kw = self.feerate_per_kw;
1605 // If there's a pending update fee, use it to ensure we aren't under-estimating
1606 // potential feerate updates coming soon.
1607 if let Some((feerate, _)) = self.pending_update_fee {
1608 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1610 if let Some(feerate) = outbound_feerate_update {
1611 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1613 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1616 /// Get forwarding information for the counterparty.
1617 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1618 self.counterparty_forwarding_info.clone()
1621 /// Returns a HTLCStats about inbound pending htlcs
1622 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1624 let mut stats = HTLCStats {
1625 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1626 pending_htlcs_value_msat: 0,
1627 on_counterparty_tx_dust_exposure_msat: 0,
1628 on_holder_tx_dust_exposure_msat: 0,
1629 holding_cell_msat: 0,
1630 on_holder_tx_holding_cell_htlcs_count: 0,
1633 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1636 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1637 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1638 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1640 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1641 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1642 for ref htlc in context.pending_inbound_htlcs.iter() {
1643 stats.pending_htlcs_value_msat += htlc.amount_msat;
1644 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1645 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1647 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1648 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1654 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1655 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1657 let mut stats = HTLCStats {
1658 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1659 pending_htlcs_value_msat: 0,
1660 on_counterparty_tx_dust_exposure_msat: 0,
1661 on_holder_tx_dust_exposure_msat: 0,
1662 holding_cell_msat: 0,
1663 on_holder_tx_holding_cell_htlcs_count: 0,
1666 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1669 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1670 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1671 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1673 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1674 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1675 for ref htlc in context.pending_outbound_htlcs.iter() {
1676 stats.pending_htlcs_value_msat += htlc.amount_msat;
1677 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1678 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1680 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1681 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1685 for update in context.holding_cell_htlc_updates.iter() {
1686 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1687 stats.pending_htlcs += 1;
1688 stats.pending_htlcs_value_msat += amount_msat;
1689 stats.holding_cell_msat += amount_msat;
1690 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1691 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1693 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1694 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1696 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1703 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1704 /// Doesn't bother handling the
1705 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1706 /// corner case properly.
1707 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1708 -> AvailableBalances
1709 where F::Target: FeeEstimator
1711 let context = &self;
1712 // Note that we have to handle overflow due to the above case.
1713 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1714 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1716 let mut balance_msat = context.value_to_self_msat;
1717 for ref htlc in context.pending_inbound_htlcs.iter() {
1718 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1719 balance_msat += htlc.amount_msat;
1722 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1724 let outbound_capacity_msat = context.value_to_self_msat
1725 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1727 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1729 let mut available_capacity_msat = outbound_capacity_msat;
1731 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1732 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1736 if context.is_outbound() {
1737 // We should mind channel commit tx fee when computing how much of the available capacity
1738 // can be used in the next htlc. Mirrors the logic in send_htlc.
1740 // The fee depends on whether the amount we will be sending is above dust or not,
1741 // and the answer will in turn change the amount itself — making it a circular
1743 // This complicates the computation around dust-values, up to the one-htlc-value.
1744 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1745 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1746 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1749 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1750 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1751 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1752 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1753 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1754 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1755 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1758 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1759 // value ends up being below dust, we have this fee available again. In that case,
1760 // match the value to right-below-dust.
1761 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1762 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1763 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1764 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1765 debug_assert!(one_htlc_difference_msat != 0);
1766 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1767 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1768 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1770 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1773 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1774 // sending a new HTLC won't reduce their balance below our reserve threshold.
1775 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1776 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1777 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1780 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1781 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1783 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1784 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1785 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1787 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1788 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1789 // we've selected for them, we can only send dust HTLCs.
1790 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1794 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1796 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1797 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1798 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1799 // send above the dust limit (as the router can always overpay to meet the dust limit).
1800 let mut remaining_msat_below_dust_exposure_limit = None;
1801 let mut dust_exposure_dust_limit_msat = 0;
1802 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1804 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1805 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1807 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1808 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1809 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1811 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1812 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1813 remaining_msat_below_dust_exposure_limit =
1814 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1815 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1818 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1819 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1820 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1821 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1822 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1823 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1826 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1827 if available_capacity_msat < dust_exposure_dust_limit_msat {
1828 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1830 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1834 available_capacity_msat = cmp::min(available_capacity_msat,
1835 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1837 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1838 available_capacity_msat = 0;
1842 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1843 - context.value_to_self_msat as i64
1844 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1845 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1847 outbound_capacity_msat,
1848 next_outbound_htlc_limit_msat: available_capacity_msat,
1849 next_outbound_htlc_minimum_msat,
1854 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1855 let context = &self;
1856 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1859 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1860 /// number of pending HTLCs that are on track to be in our next commitment tx.
1862 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1863 /// `fee_spike_buffer_htlc` is `Some`.
1865 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1866 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1868 /// Dust HTLCs are excluded.
1869 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1870 let context = &self;
1871 assert!(context.is_outbound());
1873 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1876 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1877 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1879 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1880 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1882 let mut addl_htlcs = 0;
1883 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1885 HTLCInitiator::LocalOffered => {
1886 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1890 HTLCInitiator::RemoteOffered => {
1891 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1897 let mut included_htlcs = 0;
1898 for ref htlc in context.pending_inbound_htlcs.iter() {
1899 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1902 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1903 // transaction including this HTLC if it times out before they RAA.
1904 included_htlcs += 1;
1907 for ref htlc in context.pending_outbound_htlcs.iter() {
1908 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1912 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1913 OutboundHTLCState::Committed => included_htlcs += 1,
1914 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1915 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1916 // transaction won't be generated until they send us their next RAA, which will mean
1917 // dropping any HTLCs in this state.
1922 for htlc in context.holding_cell_htlc_updates.iter() {
1924 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1925 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1930 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1931 // ack we're guaranteed to never include them in commitment txs anymore.
1935 let num_htlcs = included_htlcs + addl_htlcs;
1936 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1937 #[cfg(any(test, fuzzing))]
1940 if fee_spike_buffer_htlc.is_some() {
1941 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1943 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1944 + context.holding_cell_htlc_updates.len();
1945 let commitment_tx_info = CommitmentTxInfoCached {
1947 total_pending_htlcs,
1948 next_holder_htlc_id: match htlc.origin {
1949 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1950 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1952 next_counterparty_htlc_id: match htlc.origin {
1953 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1954 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1956 feerate: context.feerate_per_kw,
1958 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1963 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1964 /// pending HTLCs that are on track to be in their next commitment tx
1966 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1967 /// `fee_spike_buffer_htlc` is `Some`.
1969 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1970 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1972 /// Dust HTLCs are excluded.
1973 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1974 let context = &self;
1975 assert!(!context.is_outbound());
1977 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1980 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1981 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1983 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1984 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1986 let mut addl_htlcs = 0;
1987 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1989 HTLCInitiator::LocalOffered => {
1990 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1994 HTLCInitiator::RemoteOffered => {
1995 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2001 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2002 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2003 // committed outbound HTLCs, see below.
2004 let mut included_htlcs = 0;
2005 for ref htlc in context.pending_inbound_htlcs.iter() {
2006 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2009 included_htlcs += 1;
2012 for ref htlc in context.pending_outbound_htlcs.iter() {
2013 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2016 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2017 // i.e. if they've responded to us with an RAA after announcement.
2019 OutboundHTLCState::Committed => included_htlcs += 1,
2020 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2021 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2026 let num_htlcs = included_htlcs + addl_htlcs;
2027 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2028 #[cfg(any(test, fuzzing))]
2031 if fee_spike_buffer_htlc.is_some() {
2032 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2034 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2035 let commitment_tx_info = CommitmentTxInfoCached {
2037 total_pending_htlcs,
2038 next_holder_htlc_id: match htlc.origin {
2039 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2040 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2042 next_counterparty_htlc_id: match htlc.origin {
2043 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2044 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2046 feerate: context.feerate_per_kw,
2048 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2053 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2054 where F: Fn() -> Option<O> {
2055 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2056 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2063 /// Returns the transaction if there is a pending funding transaction that is yet to be
2065 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2066 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2069 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2071 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2072 self.if_unbroadcasted_funding(||
2073 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2077 /// Returns whether the channel is funded in a batch.
2078 pub fn is_batch_funding(&self) -> bool {
2079 self.is_batch_funding.is_some()
2082 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2084 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2085 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2088 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2089 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2090 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2091 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2092 /// immediately (others we will have to allow to time out).
2093 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2094 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2095 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2096 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2097 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2098 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2100 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2101 // return them to fail the payment.
2102 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2103 let counterparty_node_id = self.get_counterparty_node_id();
2104 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2106 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2107 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2112 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2113 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2114 // returning a channel monitor update here would imply a channel monitor update before
2115 // we even registered the channel monitor to begin with, which is invalid.
2116 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2117 // funding transaction, don't return a funding txo (which prevents providing the
2118 // monitor update to the user, even if we return one).
2119 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2120 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2121 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2122 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2123 update_id: self.latest_monitor_update_id,
2124 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2128 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2130 self.channel_state = ChannelState::ShutdownComplete as u32;
2131 self.update_time_counter += 1;
2134 dropped_outbound_htlcs,
2135 unbroadcasted_batch_funding_txid,
2139 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2140 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2141 let counterparty_keys = self.build_remote_transaction_keys();
2142 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2143 let signature = match &self.holder_signer {
2144 // TODO (taproot|arik): move match into calling method for Taproot
2145 ChannelSignerType::Ecdsa(ecdsa) => {
2146 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2147 .map(|(sig, _)| sig).ok()?
2149 // TODO (taproot|arik)
2154 if self.signer_pending_funding {
2155 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2156 self.signer_pending_funding = false;
2159 Some(msgs::FundingCreated {
2160 temporary_channel_id: self.temporary_channel_id.unwrap(),
2161 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2162 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2165 partial_signature_with_nonce: None,
2167 next_local_nonce: None,
2171 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2172 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2173 let counterparty_keys = self.build_remote_transaction_keys();
2174 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2176 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2177 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2178 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2179 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2181 match &self.holder_signer {
2182 // TODO (arik): move match into calling method for Taproot
2183 ChannelSignerType::Ecdsa(ecdsa) => {
2184 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2185 .map(|(signature, _)| msgs::FundingSigned {
2186 channel_id: self.channel_id(),
2189 partial_signature_with_nonce: None,
2193 if funding_signed.is_none() {
2194 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2195 self.signer_pending_funding = true;
2196 } else if self.signer_pending_funding {
2197 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2198 self.signer_pending_funding = false;
2201 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2202 (counterparty_initial_commitment_tx, funding_signed)
2204 // TODO (taproot|arik)
2211 // Internal utility functions for channels
2213 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2214 /// `channel_value_satoshis` in msat, set through
2215 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2217 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2219 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2220 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2221 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2223 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2226 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2228 channel_value_satoshis * 10 * configured_percent
2231 /// Returns a minimum channel reserve value the remote needs to maintain,
2232 /// required by us according to the configured or default
2233 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2235 /// Guaranteed to return a value no larger than channel_value_satoshis
2237 /// This is used both for outbound and inbound channels and has lower bound
2238 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2239 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2240 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2241 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2244 /// This is for legacy reasons, present for forward-compatibility.
2245 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2246 /// from storage. Hence, we use this function to not persist default values of
2247 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2248 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2249 let (q, _) = channel_value_satoshis.overflowing_div(100);
2250 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2253 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2254 // Note that num_htlcs should not include dust HTLCs.
2256 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2257 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2260 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2261 // Note that num_htlcs should not include dust HTLCs.
2262 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2263 // Note that we need to divide before multiplying to round properly,
2264 // since the lowest denomination of bitcoin on-chain is the satoshi.
2265 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2268 // Holder designates channel data owned for the benefit of the user client.
2269 // Counterparty designates channel data owned by the another channel participant entity.
2270 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2271 pub context: ChannelContext<SP>,
2274 #[cfg(any(test, fuzzing))]
2275 struct CommitmentTxInfoCached {
2277 total_pending_htlcs: usize,
2278 next_holder_htlc_id: u64,
2279 next_counterparty_htlc_id: u64,
2283 impl<SP: Deref> Channel<SP> where
2284 SP::Target: SignerProvider,
2285 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2287 fn check_remote_fee<F: Deref, L: Deref>(
2288 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2289 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2290 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2292 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2293 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2295 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2297 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2298 if feerate_per_kw < lower_limit {
2299 if let Some(cur_feerate) = cur_feerate_per_kw {
2300 if feerate_per_kw > cur_feerate {
2302 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2303 cur_feerate, feerate_per_kw);
2307 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2313 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2314 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2315 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2316 // outside of those situations will fail.
2317 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2321 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2326 1 + // script length (0)
2330 )*4 + // * 4 for non-witness parts
2331 2 + // witness marker and flag
2332 1 + // witness element count
2333 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2334 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2335 2*(1 + 71); // two signatures + sighash type flags
2336 if let Some(spk) = a_scriptpubkey {
2337 ret += ((8+1) + // output values and script length
2338 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2340 if let Some(spk) = b_scriptpubkey {
2341 ret += ((8+1) + // output values and script length
2342 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2348 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2349 assert!(self.context.pending_inbound_htlcs.is_empty());
2350 assert!(self.context.pending_outbound_htlcs.is_empty());
2351 assert!(self.context.pending_update_fee.is_none());
2353 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2354 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2355 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2357 if value_to_holder < 0 {
2358 assert!(self.context.is_outbound());
2359 total_fee_satoshis += (-value_to_holder) as u64;
2360 } else if value_to_counterparty < 0 {
2361 assert!(!self.context.is_outbound());
2362 total_fee_satoshis += (-value_to_counterparty) as u64;
2365 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2366 value_to_counterparty = 0;
2369 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2370 value_to_holder = 0;
2373 assert!(self.context.shutdown_scriptpubkey.is_some());
2374 let holder_shutdown_script = self.get_closing_scriptpubkey();
2375 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2376 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2378 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2379 (closing_transaction, total_fee_satoshis)
2382 fn funding_outpoint(&self) -> OutPoint {
2383 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2386 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2389 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2390 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2392 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2394 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2395 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2396 where L::Target: Logger {
2397 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2398 // (see equivalent if condition there).
2399 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2400 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2401 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2402 self.context.latest_monitor_update_id = mon_update_id;
2403 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2404 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2408 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2409 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2410 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2411 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2413 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2414 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2416 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2418 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2419 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2420 // these, but for now we just have to treat them as normal.
2422 let mut pending_idx = core::usize::MAX;
2423 let mut htlc_value_msat = 0;
2424 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2425 if htlc.htlc_id == htlc_id_arg {
2426 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2427 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2428 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2430 InboundHTLCState::Committed => {},
2431 InboundHTLCState::LocalRemoved(ref reason) => {
2432 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2434 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2435 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2437 return UpdateFulfillFetch::DuplicateClaim {};
2440 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2441 // Don't return in release mode here so that we can update channel_monitor
2445 htlc_value_msat = htlc.amount_msat;
2449 if pending_idx == core::usize::MAX {
2450 #[cfg(any(test, fuzzing))]
2451 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2452 // this is simply a duplicate claim, not previously failed and we lost funds.
2453 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2454 return UpdateFulfillFetch::DuplicateClaim {};
2457 // Now update local state:
2459 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2460 // can claim it even if the channel hits the chain before we see their next commitment.
2461 self.context.latest_monitor_update_id += 1;
2462 let monitor_update = ChannelMonitorUpdate {
2463 update_id: self.context.latest_monitor_update_id,
2464 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2465 payment_preimage: payment_preimage_arg.clone(),
2469 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2470 // Note that this condition is the same as the assertion in
2471 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2472 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2473 // do not not get into this branch.
2474 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2475 match pending_update {
2476 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2477 if htlc_id_arg == htlc_id {
2478 // Make sure we don't leave latest_monitor_update_id incremented here:
2479 self.context.latest_monitor_update_id -= 1;
2480 #[cfg(any(test, fuzzing))]
2481 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2482 return UpdateFulfillFetch::DuplicateClaim {};
2485 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2486 if htlc_id_arg == htlc_id {
2487 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2488 // TODO: We may actually be able to switch to a fulfill here, though its
2489 // rare enough it may not be worth the complexity burden.
2490 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2491 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2497 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2498 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2499 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2501 #[cfg(any(test, fuzzing))]
2502 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2503 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2505 #[cfg(any(test, fuzzing))]
2506 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2509 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2510 if let InboundHTLCState::Committed = htlc.state {
2512 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2513 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2515 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2516 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2519 UpdateFulfillFetch::NewClaim {
2522 msg: Some(msgs::UpdateFulfillHTLC {
2523 channel_id: self.context.channel_id(),
2524 htlc_id: htlc_id_arg,
2525 payment_preimage: payment_preimage_arg,
2530 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2531 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2532 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2533 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2534 // Even if we aren't supposed to let new monitor updates with commitment state
2535 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2536 // matter what. Sadly, to push a new monitor update which flies before others
2537 // already queued, we have to insert it into the pending queue and update the
2538 // update_ids of all the following monitors.
2539 if release_cs_monitor && msg.is_some() {
2540 let mut additional_update = self.build_commitment_no_status_check(logger);
2541 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2542 // to be strictly increasing by one, so decrement it here.
2543 self.context.latest_monitor_update_id = monitor_update.update_id;
2544 monitor_update.updates.append(&mut additional_update.updates);
2546 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2547 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2548 monitor_update.update_id = new_mon_id;
2549 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2550 held_update.update.update_id += 1;
2553 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2554 let update = self.build_commitment_no_status_check(logger);
2555 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2561 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2562 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2564 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2568 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2569 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2570 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2571 /// before we fail backwards.
2573 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2574 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2575 /// [`ChannelError::Ignore`].
2576 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2577 -> Result<(), ChannelError> where L::Target: Logger {
2578 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2579 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2582 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2583 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2584 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2585 /// before we fail backwards.
2587 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2588 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2589 /// [`ChannelError::Ignore`].
2590 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2591 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2592 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2593 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2595 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2597 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2598 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2599 // these, but for now we just have to treat them as normal.
2601 let mut pending_idx = core::usize::MAX;
2602 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2603 if htlc.htlc_id == htlc_id_arg {
2605 InboundHTLCState::Committed => {},
2606 InboundHTLCState::LocalRemoved(ref reason) => {
2607 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2609 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2614 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2615 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2621 if pending_idx == core::usize::MAX {
2622 #[cfg(any(test, fuzzing))]
2623 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2624 // is simply a duplicate fail, not previously failed and we failed-back too early.
2625 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2629 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2630 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2631 force_holding_cell = true;
2634 // Now update local state:
2635 if force_holding_cell {
2636 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2637 match pending_update {
2638 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2639 if htlc_id_arg == htlc_id {
2640 #[cfg(any(test, fuzzing))]
2641 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2645 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2646 if htlc_id_arg == htlc_id {
2647 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2648 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2654 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2655 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2656 htlc_id: htlc_id_arg,
2662 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2664 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2665 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2668 Ok(Some(msgs::UpdateFailHTLC {
2669 channel_id: self.context.channel_id(),
2670 htlc_id: htlc_id_arg,
2675 // Message handlers:
2677 /// Handles a funding_signed message from the remote end.
2678 /// If this call is successful, broadcast the funding transaction (and not before!)
2679 pub fn funding_signed<L: Deref>(
2680 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2681 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2685 if !self.context.is_outbound() {
2686 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2688 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2689 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2691 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2692 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2693 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2694 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2697 let funding_script = self.context.get_funding_redeemscript();
2699 let counterparty_keys = self.context.build_remote_transaction_keys();
2700 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2701 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2702 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2704 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2705 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2707 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2708 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2710 let trusted_tx = initial_commitment_tx.trust();
2711 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2712 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2713 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2714 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2715 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2719 let holder_commitment_tx = HolderCommitmentTransaction::new(
2720 initial_commitment_tx,
2723 &self.context.get_holder_pubkeys().funding_pubkey,
2724 self.context.counterparty_funding_pubkey()
2727 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2728 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2731 let funding_redeemscript = self.context.get_funding_redeemscript();
2732 let funding_txo = self.context.get_funding_txo().unwrap();
2733 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2734 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2735 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2736 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2737 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2738 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2739 shutdown_script, self.context.get_holder_selected_contest_delay(),
2740 &self.context.destination_script, (funding_txo, funding_txo_script),
2741 &self.context.channel_transaction_parameters,
2742 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2744 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2746 channel_monitor.provide_initial_counterparty_commitment_tx(
2747 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2748 self.context.cur_counterparty_commitment_transaction_number,
2749 self.context.counterparty_cur_commitment_point.unwrap(),
2750 counterparty_initial_commitment_tx.feerate_per_kw(),
2751 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2752 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2754 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2755 if self.context.is_batch_funding() {
2756 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2758 self.context.channel_state = ChannelState::FundingSent as u32;
2760 self.context.cur_holder_commitment_transaction_number -= 1;
2761 self.context.cur_counterparty_commitment_transaction_number -= 1;
2763 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2765 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2766 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2770 /// Updates the state of the channel to indicate that all channels in the batch have received
2771 /// funding_signed and persisted their monitors.
2772 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2773 /// treated as a non-batch channel going forward.
2774 pub fn set_batch_ready(&mut self) {
2775 self.context.is_batch_funding = None;
2776 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2779 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2780 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2782 pub fn channel_ready<NS: Deref, L: Deref>(
2783 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2784 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2785 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2787 NS::Target: NodeSigner,
2790 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2791 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2792 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2795 if let Some(scid_alias) = msg.short_channel_id_alias {
2796 if Some(scid_alias) != self.context.short_channel_id {
2797 // The scid alias provided can be used to route payments *from* our counterparty,
2798 // i.e. can be used for inbound payments and provided in invoices, but is not used
2799 // when routing outbound payments.
2800 self.context.latest_inbound_scid_alias = Some(scid_alias);
2804 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2806 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2807 // batch, but we can receive channel_ready messages.
2809 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2810 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2812 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2813 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2814 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2815 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2816 self.context.update_time_counter += 1;
2817 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2818 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2819 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2820 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2822 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2823 // required, or they're sending a fresh SCID alias.
2824 let expected_point =
2825 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2826 // If they haven't ever sent an updated point, the point they send should match
2828 self.context.counterparty_cur_commitment_point
2829 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2830 // If we've advanced the commitment number once, the second commitment point is
2831 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2832 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2833 self.context.counterparty_prev_commitment_point
2835 // If they have sent updated points, channel_ready is always supposed to match
2836 // their "first" point, which we re-derive here.
2837 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2838 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2839 ).expect("We already advanced, so previous secret keys should have been validated already")))
2841 if expected_point != Some(msg.next_per_commitment_point) {
2842 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2846 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2849 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2850 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2852 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2854 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2857 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2858 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2859 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2860 ) -> Result<(), ChannelError>
2861 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2862 FE::Target: FeeEstimator, L::Target: Logger,
2864 // We can't accept HTLCs sent after we've sent a shutdown.
2865 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2866 if local_sent_shutdown {
2867 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2869 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2870 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2871 if remote_sent_shutdown {
2872 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2874 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2875 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2877 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2878 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2880 if msg.amount_msat == 0 {
2881 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2883 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2884 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2887 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2888 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2889 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2890 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2892 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2893 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2896 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2897 // the reserve_satoshis we told them to always have as direct payment so that they lose
2898 // something if we punish them for broadcasting an old state).
2899 // Note that we don't really care about having a small/no to_remote output in our local
2900 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2901 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2902 // present in the next commitment transaction we send them (at least for fulfilled ones,
2903 // failed ones won't modify value_to_self).
2904 // Note that we will send HTLCs which another instance of rust-lightning would think
2905 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2906 // Channel state once they will not be present in the next received commitment
2908 let mut removed_outbound_total_msat = 0;
2909 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2910 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2911 removed_outbound_total_msat += htlc.amount_msat;
2912 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2913 removed_outbound_total_msat += htlc.amount_msat;
2917 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2918 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2921 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2922 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2923 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2925 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2926 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2927 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2928 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2929 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2930 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2931 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2935 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2936 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2937 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2938 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2939 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2940 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2941 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2945 let pending_value_to_self_msat =
2946 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2947 let pending_remote_value_msat =
2948 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2949 if pending_remote_value_msat < msg.amount_msat {
2950 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2953 // Check that the remote can afford to pay for this HTLC on-chain at the current
2954 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2956 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2957 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2958 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2960 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2961 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2965 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2966 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2968 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2969 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2973 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2974 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2978 if !self.context.is_outbound() {
2979 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2980 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2981 // side, only on the sender's. Note that with anchor outputs we are no longer as
2982 // sensitive to fee spikes, so we need to account for them.
2983 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2984 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2985 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2986 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2988 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2989 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2990 // the HTLC, i.e. its status is already set to failing.
2991 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2992 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2995 // Check that they won't violate our local required channel reserve by adding this HTLC.
2996 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2997 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2998 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2999 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3002 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3003 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3005 if msg.cltv_expiry >= 500000000 {
3006 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3009 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3010 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3011 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3015 // Now update local state:
3016 self.context.next_counterparty_htlc_id += 1;
3017 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3018 htlc_id: msg.htlc_id,
3019 amount_msat: msg.amount_msat,
3020 payment_hash: msg.payment_hash,
3021 cltv_expiry: msg.cltv_expiry,
3022 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3027 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3029 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3030 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3031 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3032 if htlc.htlc_id == htlc_id {
3033 let outcome = match check_preimage {
3034 None => fail_reason.into(),
3035 Some(payment_preimage) => {
3036 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3037 if payment_hash != htlc.payment_hash {
3038 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3040 OutboundHTLCOutcome::Success(Some(payment_preimage))
3044 OutboundHTLCState::LocalAnnounced(_) =>
3045 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3046 OutboundHTLCState::Committed => {
3047 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3049 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3050 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3055 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3058 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3059 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3060 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3062 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3063 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3066 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3069 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3070 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3071 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3073 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3074 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3077 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3081 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3082 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3083 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3085 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3086 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3089 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3093 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3094 where L::Target: Logger
3096 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3097 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3099 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3100 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3102 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3103 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3106 let funding_script = self.context.get_funding_redeemscript();
3108 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3110 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3111 let commitment_txid = {
3112 let trusted_tx = commitment_stats.tx.trust();
3113 let bitcoin_tx = trusted_tx.built_transaction();
3114 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3116 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3117 log_bytes!(msg.signature.serialize_compact()[..]),
3118 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3119 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3120 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3121 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3125 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3127 // If our counterparty updated the channel fee in this commitment transaction, check that
3128 // they can actually afford the new fee now.
3129 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3130 update_state == FeeUpdateState::RemoteAnnounced
3133 debug_assert!(!self.context.is_outbound());
3134 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3135 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3136 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3139 #[cfg(any(test, fuzzing))]
3141 if self.context.is_outbound() {
3142 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3143 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3144 if let Some(info) = projected_commit_tx_info {
3145 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3146 + self.context.holding_cell_htlc_updates.len();
3147 if info.total_pending_htlcs == total_pending_htlcs
3148 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3149 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3150 && info.feerate == self.context.feerate_per_kw {
3151 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3157 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3158 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3161 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3162 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3163 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3164 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3165 // backwards compatibility, we never use it in production. To provide test coverage, here,
3166 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3167 #[allow(unused_assignments, unused_mut)]
3168 let mut separate_nondust_htlc_sources = false;
3169 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3170 use core::hash::{BuildHasher, Hasher};
3171 // Get a random value using the only std API to do so - the DefaultHasher
3172 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3173 separate_nondust_htlc_sources = rand_val % 2 == 0;
3176 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3177 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3178 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3179 if let Some(_) = htlc.transaction_output_index {
3180 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3181 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3182 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3184 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3185 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3186 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3187 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3188 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3189 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3190 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3191 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3193 if !separate_nondust_htlc_sources {
3194 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3197 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3199 if separate_nondust_htlc_sources {
3200 if let Some(source) = source_opt.take() {
3201 nondust_htlc_sources.push(source);
3204 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3207 let holder_commitment_tx = HolderCommitmentTransaction::new(
3208 commitment_stats.tx,
3210 msg.htlc_signatures.clone(),
3211 &self.context.get_holder_pubkeys().funding_pubkey,
3212 self.context.counterparty_funding_pubkey()
3215 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3216 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3218 // Update state now that we've passed all the can-fail calls...
3219 let mut need_commitment = false;
3220 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3221 if *update_state == FeeUpdateState::RemoteAnnounced {
3222 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3223 need_commitment = true;
3227 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3228 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3229 Some(forward_info.clone())
3231 if let Some(forward_info) = new_forward {
3232 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3233 &htlc.payment_hash, &self.context.channel_id);
3234 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3235 need_commitment = true;
3238 let mut claimed_htlcs = Vec::new();
3239 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3240 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3241 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3242 &htlc.payment_hash, &self.context.channel_id);
3243 // Grab the preimage, if it exists, instead of cloning
3244 let mut reason = OutboundHTLCOutcome::Success(None);
3245 mem::swap(outcome, &mut reason);
3246 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3247 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3248 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3249 // have a `Success(None)` reason. In this case we could forget some HTLC
3250 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3251 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3253 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3255 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3256 need_commitment = true;
3260 self.context.latest_monitor_update_id += 1;
3261 let mut monitor_update = ChannelMonitorUpdate {
3262 update_id: self.context.latest_monitor_update_id,
3263 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3264 commitment_tx: holder_commitment_tx,
3265 htlc_outputs: htlcs_and_sigs,
3267 nondust_htlc_sources,
3271 self.context.cur_holder_commitment_transaction_number -= 1;
3272 self.context.expecting_peer_commitment_signed = false;
3273 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3274 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3275 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3277 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3278 // In case we initially failed monitor updating without requiring a response, we need
3279 // to make sure the RAA gets sent first.
3280 self.context.monitor_pending_revoke_and_ack = true;
3281 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3282 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3283 // the corresponding HTLC status updates so that
3284 // get_last_commitment_update_for_send includes the right HTLCs.
3285 self.context.monitor_pending_commitment_signed = true;
3286 let mut additional_update = self.build_commitment_no_status_check(logger);
3287 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3288 // strictly increasing by one, so decrement it here.
3289 self.context.latest_monitor_update_id = monitor_update.update_id;
3290 monitor_update.updates.append(&mut additional_update.updates);
3292 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3293 &self.context.channel_id);
3294 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3297 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3298 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3299 // we'll send one right away when we get the revoke_and_ack when we
3300 // free_holding_cell_htlcs().
3301 let mut additional_update = self.build_commitment_no_status_check(logger);
3302 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3303 // strictly increasing by one, so decrement it here.
3304 self.context.latest_monitor_update_id = monitor_update.update_id;
3305 monitor_update.updates.append(&mut additional_update.updates);
3309 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3310 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3311 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3312 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3315 /// Public version of the below, checking relevant preconditions first.
3316 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3317 /// returns `(None, Vec::new())`.
3318 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3319 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3320 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3321 where F::Target: FeeEstimator, L::Target: Logger
3323 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3324 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3325 self.free_holding_cell_htlcs(fee_estimator, logger)
3326 } else { (None, Vec::new()) }
3329 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3330 /// for our counterparty.
3331 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3332 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3333 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3334 where F::Target: FeeEstimator, L::Target: Logger
3336 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3337 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3338 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3339 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3341 let mut monitor_update = ChannelMonitorUpdate {
3342 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3343 updates: Vec::new(),
3346 let mut htlc_updates = Vec::new();
3347 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3348 let mut update_add_count = 0;
3349 let mut update_fulfill_count = 0;
3350 let mut update_fail_count = 0;
3351 let mut htlcs_to_fail = Vec::new();
3352 for htlc_update in htlc_updates.drain(..) {
3353 // Note that this *can* fail, though it should be due to rather-rare conditions on
3354 // fee races with adding too many outputs which push our total payments just over
3355 // the limit. In case it's less rare than I anticipate, we may want to revisit
3356 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3357 // to rebalance channels.
3358 match &htlc_update {
3359 &HTLCUpdateAwaitingACK::AddHTLC {
3360 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3361 skimmed_fee_msat, ..
3363 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3364 onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3366 Ok(_) => update_add_count += 1,
3369 ChannelError::Ignore(ref msg) => {
3370 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3371 // If we fail to send here, then this HTLC should
3372 // be failed backwards. Failing to send here
3373 // indicates that this HTLC may keep being put back
3374 // into the holding cell without ever being
3375 // successfully forwarded/failed/fulfilled, causing
3376 // our counterparty to eventually close on us.
3377 htlcs_to_fail.push((source.clone(), *payment_hash));
3380 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3386 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3387 // If an HTLC claim was previously added to the holding cell (via
3388 // `get_update_fulfill_htlc`, then generating the claim message itself must
3389 // not fail - any in between attempts to claim the HTLC will have resulted
3390 // in it hitting the holding cell again and we cannot change the state of a
3391 // holding cell HTLC from fulfill to anything else.
3392 let mut additional_monitor_update =
3393 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3394 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3395 { monitor_update } else { unreachable!() };
3396 update_fulfill_count += 1;
3397 monitor_update.updates.append(&mut additional_monitor_update.updates);
3399 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3400 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3401 Ok(update_fail_msg_option) => {
3402 // If an HTLC failure was previously added to the holding cell (via
3403 // `queue_fail_htlc`) then generating the fail message itself must
3404 // not fail - we should never end up in a state where we double-fail
3405 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3406 // for a full revocation before failing.
3407 debug_assert!(update_fail_msg_option.is_some());
3408 update_fail_count += 1;
3411 if let ChannelError::Ignore(_) = e {}
3413 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3420 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3421 return (None, htlcs_to_fail);
3423 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3424 self.send_update_fee(feerate, false, fee_estimator, logger)
3429 let mut additional_update = self.build_commitment_no_status_check(logger);
3430 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3431 // but we want them to be strictly increasing by one, so reset it here.
3432 self.context.latest_monitor_update_id = monitor_update.update_id;
3433 monitor_update.updates.append(&mut additional_update.updates);
3435 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3436 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3437 update_add_count, update_fulfill_count, update_fail_count);
3439 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3440 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3446 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3447 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3448 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3449 /// generating an appropriate error *after* the channel state has been updated based on the
3450 /// revoke_and_ack message.
3451 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3452 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3453 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3454 where F::Target: FeeEstimator, L::Target: Logger,
3456 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3457 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3459 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3460 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3462 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3463 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3466 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3468 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3469 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3470 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3474 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3475 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3476 // haven't given them a new commitment transaction to broadcast). We should probably
3477 // take advantage of this by updating our channel monitor, sending them an error, and
3478 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3479 // lot of work, and there's some chance this is all a misunderstanding anyway.
3480 // We have to do *something*, though, since our signer may get mad at us for otherwise
3481 // jumping a remote commitment number, so best to just force-close and move on.
3482 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3485 #[cfg(any(test, fuzzing))]
3487 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3488 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3491 match &self.context.holder_signer {
3492 ChannelSignerType::Ecdsa(ecdsa) => {
3493 ecdsa.validate_counterparty_revocation(
3494 self.context.cur_counterparty_commitment_transaction_number + 1,
3496 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3498 // TODO (taproot|arik)
3503 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3504 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3505 self.context.latest_monitor_update_id += 1;
3506 let mut monitor_update = ChannelMonitorUpdate {
3507 update_id: self.context.latest_monitor_update_id,
3508 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3509 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3510 secret: msg.per_commitment_secret,
3514 // Update state now that we've passed all the can-fail calls...
3515 // (note that we may still fail to generate the new commitment_signed message, but that's
3516 // OK, we step the channel here and *then* if the new generation fails we can fail the
3517 // channel based on that, but stepping stuff here should be safe either way.
3518 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3519 self.context.sent_message_awaiting_response = None;
3520 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3521 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3522 self.context.cur_counterparty_commitment_transaction_number -= 1;
3524 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3525 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3528 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3529 let mut to_forward_infos = Vec::new();
3530 let mut revoked_htlcs = Vec::new();
3531 let mut finalized_claimed_htlcs = Vec::new();
3532 let mut update_fail_htlcs = Vec::new();
3533 let mut update_fail_malformed_htlcs = Vec::new();
3534 let mut require_commitment = false;
3535 let mut value_to_self_msat_diff: i64 = 0;
3538 // Take references explicitly so that we can hold multiple references to self.context.
3539 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3540 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3541 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3543 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3544 pending_inbound_htlcs.retain(|htlc| {
3545 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3546 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3547 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3548 value_to_self_msat_diff += htlc.amount_msat as i64;
3550 *expecting_peer_commitment_signed = true;
3554 pending_outbound_htlcs.retain(|htlc| {
3555 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3556 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3557 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3558 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3560 finalized_claimed_htlcs.push(htlc.source.clone());
3561 // They fulfilled, so we sent them money
3562 value_to_self_msat_diff -= htlc.amount_msat as i64;
3567 for htlc in pending_inbound_htlcs.iter_mut() {
3568 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3570 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3574 let mut state = InboundHTLCState::Committed;
3575 mem::swap(&mut state, &mut htlc.state);
3577 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3578 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3579 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3580 require_commitment = true;
3581 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3582 match forward_info {
3583 PendingHTLCStatus::Fail(fail_msg) => {
3584 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3585 require_commitment = true;
3587 HTLCFailureMsg::Relay(msg) => {
3588 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3589 update_fail_htlcs.push(msg)
3591 HTLCFailureMsg::Malformed(msg) => {
3592 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3593 update_fail_malformed_htlcs.push(msg)
3597 PendingHTLCStatus::Forward(forward_info) => {
3598 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3599 to_forward_infos.push((forward_info, htlc.htlc_id));
3600 htlc.state = InboundHTLCState::Committed;
3606 for htlc in pending_outbound_htlcs.iter_mut() {
3607 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3608 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3609 htlc.state = OutboundHTLCState::Committed;
3610 *expecting_peer_commitment_signed = true;
3612 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3613 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3614 // Grab the preimage, if it exists, instead of cloning
3615 let mut reason = OutboundHTLCOutcome::Success(None);
3616 mem::swap(outcome, &mut reason);
3617 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3618 require_commitment = true;
3622 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3624 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3625 match update_state {
3626 FeeUpdateState::Outbound => {
3627 debug_assert!(self.context.is_outbound());
3628 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3629 self.context.feerate_per_kw = feerate;
3630 self.context.pending_update_fee = None;
3631 self.context.expecting_peer_commitment_signed = true;
3633 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3634 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3635 debug_assert!(!self.context.is_outbound());
3636 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3637 require_commitment = true;
3638 self.context.feerate_per_kw = feerate;
3639 self.context.pending_update_fee = None;
3644 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3645 let release_state_str =
3646 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3647 macro_rules! return_with_htlcs_to_fail {
3648 ($htlcs_to_fail: expr) => {
3649 if !release_monitor {
3650 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3651 update: monitor_update,
3653 return Ok(($htlcs_to_fail, None));
3655 return Ok(($htlcs_to_fail, Some(monitor_update)));
3660 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3661 // We can't actually generate a new commitment transaction (incl by freeing holding
3662 // cells) while we can't update the monitor, so we just return what we have.
3663 if require_commitment {
3664 self.context.monitor_pending_commitment_signed = true;
3665 // When the monitor updating is restored we'll call
3666 // get_last_commitment_update_for_send(), which does not update state, but we're
3667 // definitely now awaiting a remote revoke before we can step forward any more, so
3669 let mut additional_update = self.build_commitment_no_status_check(logger);
3670 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3671 // strictly increasing by one, so decrement it here.
3672 self.context.latest_monitor_update_id = monitor_update.update_id;
3673 monitor_update.updates.append(&mut additional_update.updates);
3675 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3676 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3677 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3678 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3679 return_with_htlcs_to_fail!(Vec::new());
3682 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3683 (Some(mut additional_update), htlcs_to_fail) => {
3684 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3685 // strictly increasing by one, so decrement it here.
3686 self.context.latest_monitor_update_id = monitor_update.update_id;
3687 monitor_update.updates.append(&mut additional_update.updates);
3689 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3690 &self.context.channel_id(), release_state_str);
3692 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3693 return_with_htlcs_to_fail!(htlcs_to_fail);
3695 (None, htlcs_to_fail) => {
3696 if require_commitment {
3697 let mut additional_update = self.build_commitment_no_status_check(logger);
3699 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3700 // strictly increasing by one, so decrement it here.
3701 self.context.latest_monitor_update_id = monitor_update.update_id;
3702 monitor_update.updates.append(&mut additional_update.updates);
3704 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3705 &self.context.channel_id(),
3706 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3709 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3710 return_with_htlcs_to_fail!(htlcs_to_fail);
3712 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3713 &self.context.channel_id(), release_state_str);
3715 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3716 return_with_htlcs_to_fail!(htlcs_to_fail);
3722 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3723 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3724 /// commitment update.
3725 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3726 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3727 where F::Target: FeeEstimator, L::Target: Logger
3729 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3730 assert!(msg_opt.is_none(), "We forced holding cell?");
3733 /// Adds a pending update to this channel. See the doc for send_htlc for
3734 /// further details on the optionness of the return value.
3735 /// If our balance is too low to cover the cost of the next commitment transaction at the
3736 /// new feerate, the update is cancelled.
3738 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3739 /// [`Channel`] if `force_holding_cell` is false.
3740 fn send_update_fee<F: Deref, L: Deref>(
3741 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3742 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3743 ) -> Option<msgs::UpdateFee>
3744 where F::Target: FeeEstimator, L::Target: Logger
3746 if !self.context.is_outbound() {
3747 panic!("Cannot send fee from inbound channel");
3749 if !self.context.is_usable() {
3750 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3752 if !self.context.is_live() {
3753 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3756 // Before proposing a feerate update, check that we can actually afford the new fee.
3757 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3758 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3759 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3760 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3761 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3762 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3763 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3764 //TODO: auto-close after a number of failures?
3765 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3769 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3770 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3771 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3772 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3773 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3774 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3777 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3778 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3782 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3783 force_holding_cell = true;
3786 if force_holding_cell {
3787 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3791 debug_assert!(self.context.pending_update_fee.is_none());
3792 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3794 Some(msgs::UpdateFee {
3795 channel_id: self.context.channel_id,
3800 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3801 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3803 /// No further message handling calls may be made until a channel_reestablish dance has
3805 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3806 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3807 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3808 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3812 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3813 // While the below code should be idempotent, it's simpler to just return early, as
3814 // redundant disconnect events can fire, though they should be rare.
3818 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3819 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3822 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3823 // will be retransmitted.
3824 self.context.last_sent_closing_fee = None;
3825 self.context.pending_counterparty_closing_signed = None;
3826 self.context.closing_fee_limits = None;
3828 let mut inbound_drop_count = 0;
3829 self.context.pending_inbound_htlcs.retain(|htlc| {
3831 InboundHTLCState::RemoteAnnounced(_) => {
3832 // They sent us an update_add_htlc but we never got the commitment_signed.
3833 // We'll tell them what commitment_signed we're expecting next and they'll drop
3834 // this HTLC accordingly
3835 inbound_drop_count += 1;
3838 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3839 // We received a commitment_signed updating this HTLC and (at least hopefully)
3840 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3841 // in response to it yet, so don't touch it.
3844 InboundHTLCState::Committed => true,
3845 InboundHTLCState::LocalRemoved(_) => {
3846 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3847 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3848 // (that we missed). Keep this around for now and if they tell us they missed
3849 // the commitment_signed we can re-transmit the update then.
3854 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3856 if let Some((_, update_state)) = self.context.pending_update_fee {
3857 if update_state == FeeUpdateState::RemoteAnnounced {
3858 debug_assert!(!self.context.is_outbound());
3859 self.context.pending_update_fee = None;
3863 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3864 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3865 // They sent us an update to remove this but haven't yet sent the corresponding
3866 // commitment_signed, we need to move it back to Committed and they can re-send
3867 // the update upon reconnection.
3868 htlc.state = OutboundHTLCState::Committed;
3872 self.context.sent_message_awaiting_response = None;
3874 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3875 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3879 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3880 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3881 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3882 /// update completes (potentially immediately).
3883 /// The messages which were generated with the monitor update must *not* have been sent to the
3884 /// remote end, and must instead have been dropped. They will be regenerated when
3885 /// [`Self::monitor_updating_restored`] is called.
3887 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3888 /// [`chain::Watch`]: crate::chain::Watch
3889 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3890 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3891 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3892 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3893 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3895 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3896 self.context.monitor_pending_commitment_signed |= resend_commitment;
3897 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3898 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3899 self.context.monitor_pending_failures.append(&mut pending_fails);
3900 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3901 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3904 /// Indicates that the latest ChannelMonitor update has been committed by the client
3905 /// successfully and we should restore normal operation. Returns messages which should be sent
3906 /// to the remote side.
3907 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3908 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3909 user_config: &UserConfig, best_block_height: u32
3910 ) -> MonitorRestoreUpdates
3913 NS::Target: NodeSigner
3915 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3916 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3918 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3919 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3920 // first received the funding_signed.
3921 let mut funding_broadcastable =
3922 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3923 self.context.funding_transaction.take()
3925 // That said, if the funding transaction is already confirmed (ie we're active with a
3926 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3927 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3928 funding_broadcastable = None;
3931 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3932 // (and we assume the user never directly broadcasts the funding transaction and waits for
3933 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3934 // * an inbound channel that failed to persist the monitor on funding_created and we got
3935 // the funding transaction confirmed before the monitor was persisted, or
3936 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3937 let channel_ready = if self.context.monitor_pending_channel_ready {
3938 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3939 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3940 self.context.monitor_pending_channel_ready = false;
3941 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3942 Some(msgs::ChannelReady {
3943 channel_id: self.context.channel_id(),
3944 next_per_commitment_point,
3945 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3949 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3951 let mut accepted_htlcs = Vec::new();
3952 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3953 let mut failed_htlcs = Vec::new();
3954 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3955 let mut finalized_claimed_htlcs = Vec::new();
3956 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3958 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3959 self.context.monitor_pending_revoke_and_ack = false;
3960 self.context.monitor_pending_commitment_signed = false;
3961 return MonitorRestoreUpdates {
3962 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3963 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3967 let raa = if self.context.monitor_pending_revoke_and_ack {
3968 Some(self.get_last_revoke_and_ack())
3970 let commitment_update = if self.context.monitor_pending_commitment_signed {
3971 self.get_last_commitment_update_for_send(logger).ok()
3973 if commitment_update.is_some() {
3974 self.mark_awaiting_response();
3977 self.context.monitor_pending_revoke_and_ack = false;
3978 self.context.monitor_pending_commitment_signed = false;
3979 let order = self.context.resend_order.clone();
3980 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3981 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3982 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3983 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3984 MonitorRestoreUpdates {
3985 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3989 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3990 where F::Target: FeeEstimator, L::Target: Logger
3992 if self.context.is_outbound() {
3993 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3995 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3996 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3998 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4000 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4001 self.context.update_time_counter += 1;
4002 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4003 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4004 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4005 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4006 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4007 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4008 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4009 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4010 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4011 msg.feerate_per_kw, holder_tx_dust_exposure)));
4013 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4014 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4015 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4021 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4024 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4025 let commitment_update = if self.context.signer_pending_commitment_update {
4026 self.get_last_commitment_update_for_send(logger).ok()
4028 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4029 self.context.get_funding_signed_msg(logger).1
4031 let channel_ready = if funding_signed.is_some() {
4032 self.check_get_channel_ready(0)
4034 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4035 self.context.get_funding_created_msg(logger)
4038 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4039 if commitment_update.is_some() { "a" } else { "no" },
4040 if funding_signed.is_some() { "a" } else { "no" },
4041 if funding_created.is_some() { "a" } else { "no" },
4042 if channel_ready.is_some() { "a" } else { "no" });
4044 SignerResumeUpdates {
4052 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4053 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4054 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4055 msgs::RevokeAndACK {
4056 channel_id: self.context.channel_id,
4057 per_commitment_secret,
4058 next_per_commitment_point,
4060 next_local_nonce: None,
4064 /// Gets the last commitment update for immediate sending to our peer.
4065 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4066 let mut update_add_htlcs = Vec::new();
4067 let mut update_fulfill_htlcs = Vec::new();
4068 let mut update_fail_htlcs = Vec::new();
4069 let mut update_fail_malformed_htlcs = Vec::new();
4071 for htlc in self.context.pending_outbound_htlcs.iter() {
4072 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4073 update_add_htlcs.push(msgs::UpdateAddHTLC {
4074 channel_id: self.context.channel_id(),
4075 htlc_id: htlc.htlc_id,
4076 amount_msat: htlc.amount_msat,
4077 payment_hash: htlc.payment_hash,
4078 cltv_expiry: htlc.cltv_expiry,
4079 onion_routing_packet: (**onion_packet).clone(),
4080 skimmed_fee_msat: htlc.skimmed_fee_msat,
4081 blinding_point: None,
4086 for htlc in self.context.pending_inbound_htlcs.iter() {
4087 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4089 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4090 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4091 channel_id: self.context.channel_id(),
4092 htlc_id: htlc.htlc_id,
4093 reason: err_packet.clone()
4096 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4097 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4098 channel_id: self.context.channel_id(),
4099 htlc_id: htlc.htlc_id,
4100 sha256_of_onion: sha256_of_onion.clone(),
4101 failure_code: failure_code.clone(),
4104 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4105 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4106 channel_id: self.context.channel_id(),
4107 htlc_id: htlc.htlc_id,
4108 payment_preimage: payment_preimage.clone(),
4115 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4116 Some(msgs::UpdateFee {
4117 channel_id: self.context.channel_id(),
4118 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4122 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4123 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4124 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4125 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4126 if self.context.signer_pending_commitment_update {
4127 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4128 self.context.signer_pending_commitment_update = false;
4132 if !self.context.signer_pending_commitment_update {
4133 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4134 self.context.signer_pending_commitment_update = true;
4138 Ok(msgs::CommitmentUpdate {
4139 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4144 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4145 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4146 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4147 assert!(self.context.shutdown_scriptpubkey.is_some());
4148 Some(msgs::Shutdown {
4149 channel_id: self.context.channel_id,
4150 scriptpubkey: self.get_closing_scriptpubkey(),
4155 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4156 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4158 /// Some links printed in log lines are included here to check them during build (when run with
4159 /// `cargo doc --document-private-items`):
4160 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4161 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4162 pub fn channel_reestablish<L: Deref, NS: Deref>(
4163 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4164 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4165 ) -> Result<ReestablishResponses, ChannelError>
4168 NS::Target: NodeSigner
4170 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4171 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4172 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4173 // just close here instead of trying to recover.
4174 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4177 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4178 msg.next_local_commitment_number == 0 {
4179 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4182 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4183 if msg.next_remote_commitment_number > 0 {
4184 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4185 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4186 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4187 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4188 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4190 if msg.next_remote_commitment_number > our_commitment_transaction {
4191 macro_rules! log_and_panic {
4192 ($err_msg: expr) => {
4193 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4194 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4197 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4198 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4199 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4200 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4201 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4202 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4203 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4204 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4208 // Before we change the state of the channel, we check if the peer is sending a very old
4209 // commitment transaction number, if yes we send a warning message.
4210 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4211 return Err(ChannelError::Warn(format!(
4212 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4213 msg.next_remote_commitment_number,
4214 our_commitment_transaction
4218 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4219 // remaining cases either succeed or ErrorMessage-fail).
4220 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4221 self.context.sent_message_awaiting_response = None;
4223 let shutdown_msg = self.get_outbound_shutdown();
4225 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4227 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4228 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4229 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4230 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4231 if msg.next_remote_commitment_number != 0 {
4232 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4234 // Short circuit the whole handler as there is nothing we can resend them
4235 return Ok(ReestablishResponses {
4236 channel_ready: None,
4237 raa: None, commitment_update: None,
4238 order: RAACommitmentOrder::CommitmentFirst,
4239 shutdown_msg, announcement_sigs,
4243 // We have OurChannelReady set!
4244 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4245 return Ok(ReestablishResponses {
4246 channel_ready: Some(msgs::ChannelReady {
4247 channel_id: self.context.channel_id(),
4248 next_per_commitment_point,
4249 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4251 raa: None, commitment_update: None,
4252 order: RAACommitmentOrder::CommitmentFirst,
4253 shutdown_msg, announcement_sigs,
4257 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4258 // Remote isn't waiting on any RevokeAndACK from us!
4259 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4261 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4262 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4263 self.context.monitor_pending_revoke_and_ack = true;
4266 Some(self.get_last_revoke_and_ack())
4269 debug_assert!(false, "All values should have been handled in the four cases above");
4270 return Err(ChannelError::Close(format!(
4271 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4272 msg.next_remote_commitment_number,
4273 our_commitment_transaction
4277 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4278 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4279 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4280 // the corresponding revoke_and_ack back yet.
4281 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4282 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4283 self.mark_awaiting_response();
4285 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4287 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4288 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4289 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4290 Some(msgs::ChannelReady {
4291 channel_id: self.context.channel_id(),
4292 next_per_commitment_point,
4293 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4297 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4298 if required_revoke.is_some() {
4299 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4301 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4304 Ok(ReestablishResponses {
4305 channel_ready, shutdown_msg, announcement_sigs,
4306 raa: required_revoke,
4307 commitment_update: None,
4308 order: self.context.resend_order.clone(),
4310 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4311 if required_revoke.is_some() {
4312 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4314 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4317 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4318 self.context.monitor_pending_commitment_signed = true;
4319 Ok(ReestablishResponses {
4320 channel_ready, shutdown_msg, announcement_sigs,
4321 commitment_update: None, raa: None,
4322 order: self.context.resend_order.clone(),
4325 Ok(ReestablishResponses {
4326 channel_ready, shutdown_msg, announcement_sigs,
4327 raa: required_revoke,
4328 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4329 order: self.context.resend_order.clone(),
4332 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4333 Err(ChannelError::Close(format!(
4334 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4335 msg.next_local_commitment_number,
4336 next_counterparty_commitment_number,
4339 Err(ChannelError::Close(format!(
4340 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4341 msg.next_local_commitment_number,
4342 next_counterparty_commitment_number,
4347 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4348 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4349 /// at which point they will be recalculated.
4350 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4352 where F::Target: FeeEstimator
4354 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4356 // Propose a range from our current Background feerate to our Normal feerate plus our
4357 // force_close_avoidance_max_fee_satoshis.
4358 // If we fail to come to consensus, we'll have to force-close.
4359 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4360 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4361 // that we don't expect to need fee bumping
4362 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4363 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4365 // The spec requires that (when the channel does not have anchors) we only send absolute
4366 // channel fees no greater than the absolute channel fee on the current commitment
4367 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4368 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4369 // some force-closure by old nodes, but we wanted to close the channel anyway.
4371 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4372 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4373 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4374 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4377 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4378 // below our dust limit, causing the output to disappear. We don't bother handling this
4379 // case, however, as this should only happen if a channel is closed before any (material)
4380 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4381 // come to consensus with our counterparty on appropriate fees, however it should be a
4382 // relatively rare case. We can revisit this later, though note that in order to determine
4383 // if the funders' output is dust we have to know the absolute fee we're going to use.
4384 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4385 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4386 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4387 // We always add force_close_avoidance_max_fee_satoshis to our normal
4388 // feerate-calculated fee, but allow the max to be overridden if we're using a
4389 // target feerate-calculated fee.
4390 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4391 proposed_max_feerate as u64 * tx_weight / 1000)
4393 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4396 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4397 self.context.closing_fee_limits.clone().unwrap()
4400 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4401 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4402 /// this point if we're the funder we should send the initial closing_signed, and in any case
4403 /// shutdown should complete within a reasonable timeframe.
4404 fn closing_negotiation_ready(&self) -> bool {
4405 self.context.closing_negotiation_ready()
4408 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4409 /// an Err if no progress is being made and the channel should be force-closed instead.
4410 /// Should be called on a one-minute timer.
4411 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4412 if self.closing_negotiation_ready() {
4413 if self.context.closing_signed_in_flight {
4414 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4416 self.context.closing_signed_in_flight = true;
4422 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4423 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4424 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4425 where F::Target: FeeEstimator, L::Target: Logger
4427 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4428 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4429 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4430 // that closing_negotiation_ready checks this case (as well as a few others).
4431 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4432 return Ok((None, None, None));
4435 if !self.context.is_outbound() {
4436 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4437 return self.closing_signed(fee_estimator, &msg);
4439 return Ok((None, None, None));
4442 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4443 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4444 if self.context.expecting_peer_commitment_signed {
4445 return Ok((None, None, None));
4448 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4450 assert!(self.context.shutdown_scriptpubkey.is_some());
4451 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4452 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4453 our_min_fee, our_max_fee, total_fee_satoshis);
4455 match &self.context.holder_signer {
4456 ChannelSignerType::Ecdsa(ecdsa) => {
4458 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4459 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4461 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4462 Ok((Some(msgs::ClosingSigned {
4463 channel_id: self.context.channel_id,
4464 fee_satoshis: total_fee_satoshis,
4466 fee_range: Some(msgs::ClosingSignedFeeRange {
4467 min_fee_satoshis: our_min_fee,
4468 max_fee_satoshis: our_max_fee,
4472 // TODO (taproot|arik)
4478 // Marks a channel as waiting for a response from the counterparty. If it's not received
4479 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4481 fn mark_awaiting_response(&mut self) {
4482 self.context.sent_message_awaiting_response = Some(0);
4485 /// Determines whether we should disconnect the counterparty due to not receiving a response
4486 /// within our expected timeframe.
4488 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4489 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4490 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4493 // Don't disconnect when we're not waiting on a response.
4496 *ticks_elapsed += 1;
4497 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4501 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4502 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4504 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4505 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4507 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4508 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4509 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4510 // can do that via error message without getting a connection fail anyway...
4511 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4513 for htlc in self.context.pending_inbound_htlcs.iter() {
4514 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4515 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4518 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4520 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4521 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4524 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4525 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4526 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4529 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4532 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4533 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4534 // any further commitment updates after we set LocalShutdownSent.
4535 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4537 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4540 assert!(send_shutdown);
4541 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4542 Ok(scriptpubkey) => scriptpubkey,
4543 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4545 if !shutdown_scriptpubkey.is_compatible(their_features) {
4546 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4548 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4553 // From here on out, we may not fail!
4555 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4556 self.context.update_time_counter += 1;
4558 let monitor_update = if update_shutdown_script {
4559 self.context.latest_monitor_update_id += 1;
4560 let monitor_update = ChannelMonitorUpdate {
4561 update_id: self.context.latest_monitor_update_id,
4562 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4563 scriptpubkey: self.get_closing_scriptpubkey(),
4566 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4567 self.push_ret_blockable_mon_update(monitor_update)
4569 let shutdown = if send_shutdown {
4570 Some(msgs::Shutdown {
4571 channel_id: self.context.channel_id,
4572 scriptpubkey: self.get_closing_scriptpubkey(),
4576 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4577 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4578 // cell HTLCs and return them to fail the payment.
4579 self.context.holding_cell_update_fee = None;
4580 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4581 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4583 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4584 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4591 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4592 self.context.update_time_counter += 1;
4594 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4597 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4598 let mut tx = closing_tx.trust().built_transaction().clone();
4600 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4602 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4603 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4604 let mut holder_sig = sig.serialize_der().to_vec();
4605 holder_sig.push(EcdsaSighashType::All as u8);
4606 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4607 cp_sig.push(EcdsaSighashType::All as u8);
4608 if funding_key[..] < counterparty_funding_key[..] {
4609 tx.input[0].witness.push(holder_sig);
4610 tx.input[0].witness.push(cp_sig);
4612 tx.input[0].witness.push(cp_sig);
4613 tx.input[0].witness.push(holder_sig);
4616 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4620 pub fn closing_signed<F: Deref>(
4621 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4622 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4623 where F::Target: FeeEstimator
4625 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4626 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4628 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4629 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4631 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4632 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4634 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4635 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4638 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4639 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4642 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4643 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4644 return Ok((None, None, None));
4647 let funding_redeemscript = self.context.get_funding_redeemscript();
4648 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4649 if used_total_fee != msg.fee_satoshis {
4650 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4652 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4654 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4657 // The remote end may have decided to revoke their output due to inconsistent dust
4658 // limits, so check for that case by re-checking the signature here.
4659 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4660 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4661 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4665 for outp in closing_tx.trust().built_transaction().output.iter() {
4666 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4667 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4671 assert!(self.context.shutdown_scriptpubkey.is_some());
4672 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4673 if last_fee == msg.fee_satoshis {
4674 let shutdown_result = ShutdownResult {
4675 monitor_update: None,
4676 dropped_outbound_htlcs: Vec::new(),
4677 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4679 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4680 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4681 self.context.update_time_counter += 1;
4682 return Ok((None, Some(tx), Some(shutdown_result)));
4686 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4688 macro_rules! propose_fee {
4689 ($new_fee: expr) => {
4690 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4691 (closing_tx, $new_fee)
4693 self.build_closing_transaction($new_fee, false)
4696 return match &self.context.holder_signer {
4697 ChannelSignerType::Ecdsa(ecdsa) => {
4699 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4700 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4701 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4702 let shutdown_result = ShutdownResult {
4703 monitor_update: None,
4704 dropped_outbound_htlcs: Vec::new(),
4705 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4707 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4708 self.context.update_time_counter += 1;
4709 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4710 (Some(tx), Some(shutdown_result))
4715 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4716 Ok((Some(msgs::ClosingSigned {
4717 channel_id: self.context.channel_id,
4718 fee_satoshis: used_fee,
4720 fee_range: Some(msgs::ClosingSignedFeeRange {
4721 min_fee_satoshis: our_min_fee,
4722 max_fee_satoshis: our_max_fee,
4724 }), signed_tx, shutdown_result))
4726 // TODO (taproot|arik)
4733 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4734 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4735 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4737 if max_fee_satoshis < our_min_fee {
4738 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4740 if min_fee_satoshis > our_max_fee {
4741 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4744 if !self.context.is_outbound() {
4745 // They have to pay, so pick the highest fee in the overlapping range.
4746 // We should never set an upper bound aside from their full balance
4747 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4748 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4750 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4751 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4752 msg.fee_satoshis, our_min_fee, our_max_fee)));
4754 // The proposed fee is in our acceptable range, accept it and broadcast!
4755 propose_fee!(msg.fee_satoshis);
4758 // Old fee style negotiation. We don't bother to enforce whether they are complying
4759 // with the "making progress" requirements, we just comply and hope for the best.
4760 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4761 if msg.fee_satoshis > last_fee {
4762 if msg.fee_satoshis < our_max_fee {
4763 propose_fee!(msg.fee_satoshis);
4764 } else if last_fee < our_max_fee {
4765 propose_fee!(our_max_fee);
4767 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4770 if msg.fee_satoshis > our_min_fee {
4771 propose_fee!(msg.fee_satoshis);
4772 } else if last_fee > our_min_fee {
4773 propose_fee!(our_min_fee);
4775 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4779 if msg.fee_satoshis < our_min_fee {
4780 propose_fee!(our_min_fee);
4781 } else if msg.fee_satoshis > our_max_fee {
4782 propose_fee!(our_max_fee);
4784 propose_fee!(msg.fee_satoshis);
4790 fn internal_htlc_satisfies_config(
4791 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4792 ) -> Result<(), (&'static str, u16)> {
4793 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4794 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4795 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4796 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4798 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4799 0x1000 | 12, // fee_insufficient
4802 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4804 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4805 0x1000 | 13, // incorrect_cltv_expiry
4811 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4812 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4813 /// unsuccessful, falls back to the previous one if one exists.
4814 pub fn htlc_satisfies_config(
4815 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4816 ) -> Result<(), (&'static str, u16)> {
4817 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4819 if let Some(prev_config) = self.context.prev_config() {
4820 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4827 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4828 self.context.cur_holder_commitment_transaction_number + 1
4831 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4832 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4835 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4836 self.context.cur_counterparty_commitment_transaction_number + 2
4840 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4841 &self.context.holder_signer
4845 pub fn get_value_stat(&self) -> ChannelValueStat {
4847 value_to_self_msat: self.context.value_to_self_msat,
4848 channel_value_msat: self.context.channel_value_satoshis * 1000,
4849 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4850 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4851 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4852 holding_cell_outbound_amount_msat: {
4854 for h in self.context.holding_cell_htlc_updates.iter() {
4856 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4864 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4865 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4869 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4870 /// Allowed in any state (including after shutdown)
4871 pub fn is_awaiting_monitor_update(&self) -> bool {
4872 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4875 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4876 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4877 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4878 self.context.blocked_monitor_updates[0].update.update_id - 1
4881 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4882 /// further blocked monitor update exists after the next.
4883 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4884 if self.context.blocked_monitor_updates.is_empty() { return None; }
4885 Some((self.context.blocked_monitor_updates.remove(0).update,
4886 !self.context.blocked_monitor_updates.is_empty()))
4889 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4890 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4891 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4892 -> Option<ChannelMonitorUpdate> {
4893 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4894 if !release_monitor {
4895 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4904 pub fn blocked_monitor_updates_pending(&self) -> usize {
4905 self.context.blocked_monitor_updates.len()
4908 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4909 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4910 /// transaction. If the channel is inbound, this implies simply that the channel has not
4912 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4913 if !self.is_awaiting_monitor_update() { return false; }
4914 if self.context.channel_state &
4915 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4916 == ChannelState::FundingSent as u32 {
4917 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4918 // FundingSent set, though our peer could have sent their channel_ready.
4919 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4922 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4923 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4924 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4925 // waiting for the initial monitor persistence. Thus, we check if our commitment
4926 // transaction numbers have both been iterated only exactly once (for the
4927 // funding_signed), and we're awaiting monitor update.
4929 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4930 // only way to get an awaiting-monitor-update state during initial funding is if the
4931 // initial monitor persistence is still pending).
4933 // Because deciding we're awaiting initial broadcast spuriously could result in
4934 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4935 // we hard-assert here, even in production builds.
4936 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4937 assert!(self.context.monitor_pending_channel_ready);
4938 assert_eq!(self.context.latest_monitor_update_id, 0);
4944 /// Returns true if our channel_ready has been sent
4945 pub fn is_our_channel_ready(&self) -> bool {
4946 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4949 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4950 pub fn received_shutdown(&self) -> bool {
4951 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4954 /// Returns true if we either initiated or agreed to shut down the channel.
4955 pub fn sent_shutdown(&self) -> bool {
4956 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4959 /// Returns true if this channel is fully shut down. True here implies that no further actions
4960 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4961 /// will be handled appropriately by the chain monitor.
4962 pub fn is_shutdown(&self) -> bool {
4963 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4964 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4969 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4970 self.context.channel_update_status
4973 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4974 self.context.update_time_counter += 1;
4975 self.context.channel_update_status = status;
4978 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4980 // * always when a new block/transactions are confirmed with the new height
4981 // * when funding is signed with a height of 0
4982 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4986 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4987 if funding_tx_confirmations <= 0 {
4988 self.context.funding_tx_confirmation_height = 0;
4991 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4995 // If we're still pending the signature on a funding transaction, then we're not ready to send a
4996 // channel_ready yet.
4997 if self.context.signer_pending_funding {
5001 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5002 // channel_ready until the entire batch is ready.
5003 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5004 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5005 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5007 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5008 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5009 self.context.update_time_counter += 1;
5011 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5012 // We got a reorg but not enough to trigger a force close, just ignore.
5015 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5016 // We should never see a funding transaction on-chain until we've received
5017 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5018 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5019 // however, may do this and we shouldn't treat it as a bug.
5020 #[cfg(not(fuzzing))]
5021 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5022 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5023 self.context.channel_state);
5025 // We got a reorg but not enough to trigger a force close, just ignore.
5029 if need_commitment_update {
5030 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5031 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5032 let next_per_commitment_point =
5033 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5034 return Some(msgs::ChannelReady {
5035 channel_id: self.context.channel_id,
5036 next_per_commitment_point,
5037 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5041 self.context.monitor_pending_channel_ready = true;
5047 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5048 /// In the first case, we store the confirmation height and calculating the short channel id.
5049 /// In the second, we simply return an Err indicating we need to be force-closed now.
5050 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5051 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5052 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5053 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5055 NS::Target: NodeSigner,
5058 let mut msgs = (None, None);
5059 if let Some(funding_txo) = self.context.get_funding_txo() {
5060 for &(index_in_block, tx) in txdata.iter() {
5061 // Check if the transaction is the expected funding transaction, and if it is,
5062 // check that it pays the right amount to the right script.
5063 if self.context.funding_tx_confirmation_height == 0 {
5064 if tx.txid() == funding_txo.txid {
5065 let txo_idx = funding_txo.index as usize;
5066 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5067 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5068 if self.context.is_outbound() {
5069 // If we generated the funding transaction and it doesn't match what it
5070 // should, the client is really broken and we should just panic and
5071 // tell them off. That said, because hash collisions happen with high
5072 // probability in fuzzing mode, if we're fuzzing we just close the
5073 // channel and move on.
5074 #[cfg(not(fuzzing))]
5075 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5077 self.context.update_time_counter += 1;
5078 let err_reason = "funding tx had wrong script/value or output index";
5079 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5081 if self.context.is_outbound() {
5082 if !tx.is_coin_base() {
5083 for input in tx.input.iter() {
5084 if input.witness.is_empty() {
5085 // We generated a malleable funding transaction, implying we've
5086 // just exposed ourselves to funds loss to our counterparty.
5087 #[cfg(not(fuzzing))]
5088 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5093 self.context.funding_tx_confirmation_height = height;
5094 self.context.funding_tx_confirmed_in = Some(*block_hash);
5095 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5096 Ok(scid) => Some(scid),
5097 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5100 // If this is a coinbase transaction and not a 0-conf channel
5101 // we should update our min_depth to 100 to handle coinbase maturity
5102 if tx.is_coin_base() &&
5103 self.context.minimum_depth.unwrap_or(0) > 0 &&
5104 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5105 self.context.minimum_depth = Some(COINBASE_MATURITY);
5108 // If we allow 1-conf funding, we may need to check for channel_ready here and
5109 // send it immediately instead of waiting for a best_block_updated call (which
5110 // may have already happened for this block).
5111 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5112 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5113 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5114 msgs = (Some(channel_ready), announcement_sigs);
5117 for inp in tx.input.iter() {
5118 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5119 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5120 return Err(ClosureReason::CommitmentTxConfirmed);
5128 /// When a new block is connected, we check the height of the block against outbound holding
5129 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5130 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5131 /// handled by the ChannelMonitor.
5133 /// If we return Err, the channel may have been closed, at which point the standard
5134 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5137 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5139 pub fn best_block_updated<NS: Deref, L: Deref>(
5140 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5141 node_signer: &NS, user_config: &UserConfig, logger: &L
5142 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5144 NS::Target: NodeSigner,
5147 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5150 fn do_best_block_updated<NS: Deref, L: Deref>(
5151 &mut self, height: u32, highest_header_time: u32,
5152 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5153 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5155 NS::Target: NodeSigner,
5158 let mut timed_out_htlcs = Vec::new();
5159 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5160 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5162 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5163 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5165 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5166 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5167 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5175 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5177 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5178 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5179 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5181 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5182 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5185 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5186 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5187 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5188 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5189 if self.context.funding_tx_confirmation_height == 0 {
5190 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5191 // zero if it has been reorged out, however in either case, our state flags
5192 // indicate we've already sent a channel_ready
5193 funding_tx_confirmations = 0;
5196 // If we've sent channel_ready (or have both sent and received channel_ready), and
5197 // the funding transaction has become unconfirmed,
5198 // close the channel and hope we can get the latest state on chain (because presumably
5199 // the funding transaction is at least still in the mempool of most nodes).
5201 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5202 // 0-conf channel, but not doing so may lead to the
5203 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5205 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5206 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5207 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5208 return Err(ClosureReason::ProcessingError { err: err_reason });
5210 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5211 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5212 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5213 // If funding_tx_confirmed_in is unset, the channel must not be active
5214 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5215 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5216 return Err(ClosureReason::FundingTimedOut);
5219 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5220 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5222 Ok((None, timed_out_htlcs, announcement_sigs))
5225 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5226 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5227 /// before the channel has reached channel_ready and we can just wait for more blocks.
5228 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5229 if self.context.funding_tx_confirmation_height != 0 {
5230 // We handle the funding disconnection by calling best_block_updated with a height one
5231 // below where our funding was connected, implying a reorg back to conf_height - 1.
5232 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5233 // We use the time field to bump the current time we set on channel updates if its
5234 // larger. If we don't know that time has moved forward, we can just set it to the last
5235 // time we saw and it will be ignored.
5236 let best_time = self.context.update_time_counter;
5237 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5238 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5239 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5240 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5241 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5247 // We never learned about the funding confirmation anyway, just ignore
5252 // Methods to get unprompted messages to send to the remote end (or where we already returned
5253 // something in the handler for the message that prompted this message):
5255 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5256 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5257 /// directions). Should be used for both broadcasted announcements and in response to an
5258 /// AnnouncementSignatures message from the remote peer.
5260 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5263 /// This will only return ChannelError::Ignore upon failure.
5265 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5266 fn get_channel_announcement<NS: Deref>(
5267 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5268 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5269 if !self.context.config.announced_channel {
5270 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5272 if !self.context.is_usable() {
5273 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5276 let short_channel_id = self.context.get_short_channel_id()
5277 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5278 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5279 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5280 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5281 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5283 let msg = msgs::UnsignedChannelAnnouncement {
5284 features: channelmanager::provided_channel_features(&user_config),
5287 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5288 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5289 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5290 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5291 excess_data: Vec::new(),
5297 fn get_announcement_sigs<NS: Deref, L: Deref>(
5298 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5299 best_block_height: u32, logger: &L
5300 ) -> Option<msgs::AnnouncementSignatures>
5302 NS::Target: NodeSigner,
5305 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5309 if !self.context.is_usable() {
5313 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5314 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5318 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5322 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5323 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5326 log_trace!(logger, "{:?}", e);
5330 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5332 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5337 match &self.context.holder_signer {
5338 ChannelSignerType::Ecdsa(ecdsa) => {
5339 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5341 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5346 let short_channel_id = match self.context.get_short_channel_id() {
5348 None => return None,
5351 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5353 Some(msgs::AnnouncementSignatures {
5354 channel_id: self.context.channel_id(),
5356 node_signature: our_node_sig,
5357 bitcoin_signature: our_bitcoin_sig,
5360 // TODO (taproot|arik)
5366 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5368 fn sign_channel_announcement<NS: Deref>(
5369 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5370 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5371 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5372 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5373 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5374 let were_node_one = announcement.node_id_1 == our_node_key;
5376 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5377 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5378 match &self.context.holder_signer {
5379 ChannelSignerType::Ecdsa(ecdsa) => {
5380 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5381 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5382 Ok(msgs::ChannelAnnouncement {
5383 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5384 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5385 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5386 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5387 contents: announcement,
5390 // TODO (taproot|arik)
5395 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5399 /// Processes an incoming announcement_signatures message, providing a fully-signed
5400 /// channel_announcement message which we can broadcast and storing our counterparty's
5401 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5402 pub fn announcement_signatures<NS: Deref>(
5403 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5404 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5405 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5406 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5408 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5410 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5411 return Err(ChannelError::Close(format!(
5412 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5413 &announcement, self.context.get_counterparty_node_id())));
5415 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5416 return Err(ChannelError::Close(format!(
5417 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5418 &announcement, self.context.counterparty_funding_pubkey())));
5421 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5422 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5423 return Err(ChannelError::Ignore(
5424 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5427 self.sign_channel_announcement(node_signer, announcement)
5430 /// Gets a signed channel_announcement for this channel, if we previously received an
5431 /// announcement_signatures from our counterparty.
5432 pub fn get_signed_channel_announcement<NS: Deref>(
5433 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5434 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5435 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5438 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5440 Err(_) => return None,
5442 match self.sign_channel_announcement(node_signer, announcement) {
5443 Ok(res) => Some(res),
5448 /// May panic if called on a channel that wasn't immediately-previously
5449 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5450 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5451 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5452 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5453 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5454 // current to_remote balances. However, it no longer has any use, and thus is now simply
5455 // set to a dummy (but valid, as required by the spec) public key.
5456 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5457 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5458 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5459 let mut pk = [2; 33]; pk[1] = 0xff;
5460 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5461 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5462 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5463 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5466 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5469 self.mark_awaiting_response();
5470 msgs::ChannelReestablish {
5471 channel_id: self.context.channel_id(),
5472 // The protocol has two different commitment number concepts - the "commitment
5473 // transaction number", which starts from 0 and counts up, and the "revocation key
5474 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5475 // commitment transaction numbers by the index which will be used to reveal the
5476 // revocation key for that commitment transaction, which means we have to convert them
5477 // to protocol-level commitment numbers here...
5479 // next_local_commitment_number is the next commitment_signed number we expect to
5480 // receive (indicating if they need to resend one that we missed).
5481 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5482 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5483 // receive, however we track it by the next commitment number for a remote transaction
5484 // (which is one further, as they always revoke previous commitment transaction, not
5485 // the one we send) so we have to decrement by 1. Note that if
5486 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5487 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5489 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5490 your_last_per_commitment_secret: remote_last_secret,
5491 my_current_per_commitment_point: dummy_pubkey,
5492 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5493 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5494 // txid of that interactive transaction, else we MUST NOT set it.
5495 next_funding_txid: None,
5500 // Send stuff to our remote peers:
5502 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5503 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5504 /// commitment update.
5506 /// `Err`s will only be [`ChannelError::Ignore`].
5507 pub fn queue_add_htlc<F: Deref, L: Deref>(
5508 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5509 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5510 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5511 ) -> Result<(), ChannelError>
5512 where F::Target: FeeEstimator, L::Target: Logger
5515 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5516 skimmed_fee_msat, fee_estimator, logger)
5517 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5519 if let ChannelError::Ignore(_) = err { /* fine */ }
5520 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5525 /// Adds a pending outbound HTLC to this channel, note that you probably want
5526 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5528 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5530 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5531 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5533 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5534 /// we may not yet have sent the previous commitment update messages and will need to
5535 /// regenerate them.
5537 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5538 /// on this [`Channel`] if `force_holding_cell` is false.
5540 /// `Err`s will only be [`ChannelError::Ignore`].
5541 fn send_htlc<F: Deref, L: Deref>(
5542 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5543 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5544 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5545 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5546 where F::Target: FeeEstimator, L::Target: Logger
5548 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5549 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5551 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5552 if amount_msat > channel_total_msat {
5553 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5556 if amount_msat == 0 {
5557 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5560 let available_balances = self.context.get_available_balances(fee_estimator);
5561 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5562 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5563 available_balances.next_outbound_htlc_minimum_msat)));
5566 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5567 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5568 available_balances.next_outbound_htlc_limit_msat)));
5571 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5572 // Note that this should never really happen, if we're !is_live() on receipt of an
5573 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5574 // the user to send directly into a !is_live() channel. However, if we
5575 // disconnected during the time the previous hop was doing the commitment dance we may
5576 // end up getting here after the forwarding delay. In any case, returning an
5577 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5578 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5581 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5582 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5583 payment_hash, amount_msat,
5584 if force_holding_cell { "into holding cell" }
5585 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5586 else { "to peer" });
5588 if need_holding_cell {
5589 force_holding_cell = true;
5592 // Now update local state:
5593 if force_holding_cell {
5594 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5599 onion_routing_packet,
5601 blinding_point: None,
5606 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5607 htlc_id: self.context.next_holder_htlc_id,
5609 payment_hash: payment_hash.clone(),
5611 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5613 blinding_point: None,
5617 let res = msgs::UpdateAddHTLC {
5618 channel_id: self.context.channel_id,
5619 htlc_id: self.context.next_holder_htlc_id,
5623 onion_routing_packet,
5625 blinding_point: None,
5627 self.context.next_holder_htlc_id += 1;
5632 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5633 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5634 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5635 // fail to generate this, we still are at least at a position where upgrading their status
5637 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5638 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5639 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5641 if let Some(state) = new_state {
5642 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5646 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5647 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5648 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5649 // Grab the preimage, if it exists, instead of cloning
5650 let mut reason = OutboundHTLCOutcome::Success(None);
5651 mem::swap(outcome, &mut reason);
5652 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5655 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5656 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5657 debug_assert!(!self.context.is_outbound());
5658 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5659 self.context.feerate_per_kw = feerate;
5660 self.context.pending_update_fee = None;
5663 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5665 let (mut htlcs_ref, counterparty_commitment_tx) =
5666 self.build_commitment_no_state_update(logger);
5667 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5668 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5669 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5671 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5672 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5675 self.context.latest_monitor_update_id += 1;
5676 let monitor_update = ChannelMonitorUpdate {
5677 update_id: self.context.latest_monitor_update_id,
5678 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5679 commitment_txid: counterparty_commitment_txid,
5680 htlc_outputs: htlcs.clone(),
5681 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5682 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5683 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5684 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5685 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5688 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5692 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5693 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5694 where L::Target: Logger
5696 let counterparty_keys = self.context.build_remote_transaction_keys();
5697 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5698 let counterparty_commitment_tx = commitment_stats.tx;
5700 #[cfg(any(test, fuzzing))]
5702 if !self.context.is_outbound() {
5703 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5704 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5705 if let Some(info) = projected_commit_tx_info {
5706 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5707 if info.total_pending_htlcs == total_pending_htlcs
5708 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5709 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5710 && info.feerate == self.context.feerate_per_kw {
5711 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5712 assert_eq!(actual_fee, info.fee);
5718 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5721 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5722 /// generation when we shouldn't change HTLC/channel state.
5723 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5724 // Get the fee tests from `build_commitment_no_state_update`
5725 #[cfg(any(test, fuzzing))]
5726 self.build_commitment_no_state_update(logger);
5728 let counterparty_keys = self.context.build_remote_transaction_keys();
5729 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5730 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5732 match &self.context.holder_signer {
5733 ChannelSignerType::Ecdsa(ecdsa) => {
5734 let (signature, htlc_signatures);
5737 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5738 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5742 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5743 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5745 htlc_signatures = res.1;
5747 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5748 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5749 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5750 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5752 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5753 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5754 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5755 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5756 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5757 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5761 Ok((msgs::CommitmentSigned {
5762 channel_id: self.context.channel_id,
5766 partial_signature_with_nonce: None,
5767 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5769 // TODO (taproot|arik)
5775 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5776 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5778 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5779 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5780 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5781 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5782 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5783 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5784 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5785 where F::Target: FeeEstimator, L::Target: Logger
5787 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5788 onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5789 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5792 let monitor_update = self.build_commitment_no_status_check(logger);
5793 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5794 Ok(self.push_ret_blockable_mon_update(monitor_update))
5800 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5802 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5803 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5804 fee_base_msat: msg.contents.fee_base_msat,
5805 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5806 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5808 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5810 self.context.counterparty_forwarding_info = new_forwarding_info;
5816 /// Begins the shutdown process, getting a message for the remote peer and returning all
5817 /// holding cell HTLCs for payment failure.
5819 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5820 /// [`ChannelMonitorUpdate`] will be returned).
5821 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5822 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5823 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5825 for htlc in self.context.pending_outbound_htlcs.iter() {
5826 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5827 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5830 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5831 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5832 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5834 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5835 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5838 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5839 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5841 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5842 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5843 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5846 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5847 // script is set, we just force-close and call it a day.
5848 let mut chan_closed = false;
5849 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5853 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5855 None if !chan_closed => {
5856 // use override shutdown script if provided
5857 let shutdown_scriptpubkey = match override_shutdown_script {
5858 Some(script) => script,
5860 // otherwise, use the shutdown scriptpubkey provided by the signer
5861 match signer_provider.get_shutdown_scriptpubkey() {
5862 Ok(scriptpubkey) => scriptpubkey,
5863 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5867 if !shutdown_scriptpubkey.is_compatible(their_features) {
5868 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5870 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5876 // From here on out, we may not fail!
5877 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5878 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5879 let shutdown_result = ShutdownResult {
5880 monitor_update: None,
5881 dropped_outbound_htlcs: Vec::new(),
5882 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5884 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5885 Some(shutdown_result)
5887 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5890 self.context.update_time_counter += 1;
5892 let monitor_update = if update_shutdown_script {
5893 self.context.latest_monitor_update_id += 1;
5894 let monitor_update = ChannelMonitorUpdate {
5895 update_id: self.context.latest_monitor_update_id,
5896 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5897 scriptpubkey: self.get_closing_scriptpubkey(),
5900 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5901 self.push_ret_blockable_mon_update(monitor_update)
5903 let shutdown = msgs::Shutdown {
5904 channel_id: self.context.channel_id,
5905 scriptpubkey: self.get_closing_scriptpubkey(),
5908 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5909 // our shutdown until we've committed all of the pending changes.
5910 self.context.holding_cell_update_fee = None;
5911 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5912 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5914 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5915 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5922 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5923 "we can't both complete shutdown and return a monitor update");
5925 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5928 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5929 self.context.holding_cell_htlc_updates.iter()
5930 .flat_map(|htlc_update| {
5932 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5933 => Some((source, payment_hash)),
5937 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5941 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5942 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5943 pub context: ChannelContext<SP>,
5944 pub unfunded_context: UnfundedChannelContext,
5947 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5948 pub fn new<ES: Deref, F: Deref>(
5949 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5950 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5951 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
5952 ) -> Result<OutboundV1Channel<SP>, APIError>
5953 where ES::Target: EntropySource,
5954 F::Target: FeeEstimator
5956 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5957 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5958 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5959 let pubkeys = holder_signer.pubkeys().clone();
5961 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5962 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5964 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5965 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5967 let channel_value_msat = channel_value_satoshis * 1000;
5968 if push_msat > channel_value_msat {
5969 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5971 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5972 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5974 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5975 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5976 // Protocol level safety check in place, although it should never happen because
5977 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5978 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5981 let channel_type = Self::get_initial_channel_type(&config, their_features);
5982 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5984 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5985 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5987 (ConfirmationTarget::NonAnchorChannelFee, 0)
5989 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5991 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5992 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5993 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5994 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5997 let mut secp_ctx = Secp256k1::new();
5998 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6000 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6001 match signer_provider.get_shutdown_scriptpubkey() {
6002 Ok(scriptpubkey) => Some(scriptpubkey),
6003 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6007 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6008 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6009 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6013 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6014 Ok(script) => script,
6015 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6018 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6021 context: ChannelContext {
6024 config: LegacyChannelConfig {
6025 options: config.channel_config.clone(),
6026 announced_channel: config.channel_handshake_config.announced_channel,
6027 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6032 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6034 channel_id: temporary_channel_id,
6035 temporary_channel_id: Some(temporary_channel_id),
6036 channel_state: ChannelState::OurInitSent as u32,
6037 announcement_sigs_state: AnnouncementSigsState::NotSent,
6039 channel_value_satoshis,
6041 latest_monitor_update_id: 0,
6043 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6044 shutdown_scriptpubkey,
6047 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6048 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6051 pending_inbound_htlcs: Vec::new(),
6052 pending_outbound_htlcs: Vec::new(),
6053 holding_cell_htlc_updates: Vec::new(),
6054 pending_update_fee: None,
6055 holding_cell_update_fee: None,
6056 next_holder_htlc_id: 0,
6057 next_counterparty_htlc_id: 0,
6058 update_time_counter: 1,
6060 resend_order: RAACommitmentOrder::CommitmentFirst,
6062 monitor_pending_channel_ready: false,
6063 monitor_pending_revoke_and_ack: false,
6064 monitor_pending_commitment_signed: false,
6065 monitor_pending_forwards: Vec::new(),
6066 monitor_pending_failures: Vec::new(),
6067 monitor_pending_finalized_fulfills: Vec::new(),
6069 signer_pending_commitment_update: false,
6070 signer_pending_funding: false,
6072 #[cfg(debug_assertions)]
6073 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6074 #[cfg(debug_assertions)]
6075 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6077 last_sent_closing_fee: None,
6078 pending_counterparty_closing_signed: None,
6079 expecting_peer_commitment_signed: false,
6080 closing_fee_limits: None,
6081 target_closing_feerate_sats_per_kw: None,
6083 funding_tx_confirmed_in: None,
6084 funding_tx_confirmation_height: 0,
6085 short_channel_id: None,
6086 channel_creation_height: current_chain_height,
6088 feerate_per_kw: commitment_feerate,
6089 counterparty_dust_limit_satoshis: 0,
6090 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6091 counterparty_max_htlc_value_in_flight_msat: 0,
6092 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6093 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6094 holder_selected_channel_reserve_satoshis,
6095 counterparty_htlc_minimum_msat: 0,
6096 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6097 counterparty_max_accepted_htlcs: 0,
6098 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6099 minimum_depth: None, // Filled in in accept_channel
6101 counterparty_forwarding_info: None,
6103 channel_transaction_parameters: ChannelTransactionParameters {
6104 holder_pubkeys: pubkeys,
6105 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6106 is_outbound_from_holder: true,
6107 counterparty_parameters: None,
6108 funding_outpoint: None,
6109 channel_type_features: channel_type.clone()
6111 funding_transaction: None,
6112 is_batch_funding: None,
6114 counterparty_cur_commitment_point: None,
6115 counterparty_prev_commitment_point: None,
6116 counterparty_node_id,
6118 counterparty_shutdown_scriptpubkey: None,
6120 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6122 channel_update_status: ChannelUpdateStatus::Enabled,
6123 closing_signed_in_flight: false,
6125 announcement_sigs: None,
6127 #[cfg(any(test, fuzzing))]
6128 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6129 #[cfg(any(test, fuzzing))]
6130 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6132 workaround_lnd_bug_4006: None,
6133 sent_message_awaiting_response: None,
6135 latest_inbound_scid_alias: None,
6136 outbound_scid_alias,
6138 channel_pending_event_emitted: false,
6139 channel_ready_event_emitted: false,
6141 #[cfg(any(test, fuzzing))]
6142 historical_inbound_htlc_fulfills: HashSet::new(),
6147 blocked_monitor_updates: Vec::new(),
6149 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6153 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6154 /// a funding_created message for the remote peer.
6155 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6156 /// or if called on an inbound channel.
6157 /// Note that channel_id changes during this call!
6158 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6159 /// If an Err is returned, it is a ChannelError::Close.
6160 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6161 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6162 if !self.context.is_outbound() {
6163 panic!("Tried to create outbound funding_created message on an inbound channel!");
6165 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6166 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6168 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6169 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6170 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6171 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6174 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6175 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6177 // Now that we're past error-generating stuff, update our local state:
6179 self.context.channel_state = ChannelState::FundingCreated as u32;
6180 self.context.channel_id = funding_txo.to_channel_id();
6182 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6183 // We can skip this if it is a zero-conf channel.
6184 if funding_transaction.is_coin_base() &&
6185 self.context.minimum_depth.unwrap_or(0) > 0 &&
6186 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6187 self.context.minimum_depth = Some(COINBASE_MATURITY);
6190 self.context.funding_transaction = Some(funding_transaction);
6191 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6193 let funding_created = self.context.get_funding_created_msg(logger);
6194 if funding_created.is_none() {
6195 if !self.context.signer_pending_funding {
6196 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6197 self.context.signer_pending_funding = true;
6201 let channel = Channel {
6202 context: self.context,
6205 Ok((channel, funding_created))
6208 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6209 // The default channel type (ie the first one we try) depends on whether the channel is
6210 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6211 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6212 // with no other changes, and fall back to `only_static_remotekey`.
6213 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6214 if !config.channel_handshake_config.announced_channel &&
6215 config.channel_handshake_config.negotiate_scid_privacy &&
6216 their_features.supports_scid_privacy() {
6217 ret.set_scid_privacy_required();
6220 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6221 // set it now. If they don't understand it, we'll fall back to our default of
6222 // `only_static_remotekey`.
6223 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6224 their_features.supports_anchors_zero_fee_htlc_tx() {
6225 ret.set_anchors_zero_fee_htlc_tx_required();
6231 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6232 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6233 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6234 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6235 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6236 ) -> Result<msgs::OpenChannel, ()>
6238 F::Target: FeeEstimator
6240 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6241 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6242 // We've exhausted our options
6245 // We support opening a few different types of channels. Try removing our additional
6246 // features one by one until we've either arrived at our default or the counterparty has
6249 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6250 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6251 // checks whether the counterparty supports every feature, this would only happen if the
6252 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6254 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6255 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6256 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6257 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6258 } else if self.context.channel_type.supports_scid_privacy() {
6259 self.context.channel_type.clear_scid_privacy();
6261 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6263 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6264 Ok(self.get_open_channel(chain_hash))
6267 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6268 if !self.context.is_outbound() {
6269 panic!("Tried to open a channel for an inbound channel?");
6271 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6272 panic!("Cannot generate an open_channel after we've moved forward");
6275 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6276 panic!("Tried to send an open_channel for a channel that has already advanced");
6279 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6280 let keys = self.context.get_holder_pubkeys();
6284 temporary_channel_id: self.context.channel_id,
6285 funding_satoshis: self.context.channel_value_satoshis,
6286 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6287 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6288 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6289 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6290 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6291 feerate_per_kw: self.context.feerate_per_kw as u32,
6292 to_self_delay: self.context.get_holder_selected_contest_delay(),
6293 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6294 funding_pubkey: keys.funding_pubkey,
6295 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6296 payment_point: keys.payment_point,
6297 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6298 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6299 first_per_commitment_point,
6300 channel_flags: if self.context.config.announced_channel {1} else {0},
6301 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6302 Some(script) => script.clone().into_inner(),
6303 None => Builder::new().into_script(),
6305 channel_type: Some(self.context.channel_type.clone()),
6310 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6311 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6313 // Check sanity of message fields:
6314 if !self.context.is_outbound() {
6315 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6317 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6318 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6320 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6321 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6323 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6324 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6326 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6327 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6329 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6330 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6331 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6333 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6334 if msg.htlc_minimum_msat >= full_channel_value_msat {
6335 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6337 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6338 if msg.to_self_delay > max_delay_acceptable {
6339 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6341 if msg.max_accepted_htlcs < 1 {
6342 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6344 if msg.max_accepted_htlcs > MAX_HTLCS {
6345 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6348 // Now check against optional parameters as set by config...
6349 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6350 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6352 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6353 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6355 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6356 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6358 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6359 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6361 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6362 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6364 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6365 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6367 if msg.minimum_depth > peer_limits.max_minimum_depth {
6368 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6371 if let Some(ty) = &msg.channel_type {
6372 if *ty != self.context.channel_type {
6373 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6375 } else if their_features.supports_channel_type() {
6376 // Assume they've accepted the channel type as they said they understand it.
6378 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6379 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6380 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6382 self.context.channel_type = channel_type.clone();
6383 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6386 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6387 match &msg.shutdown_scriptpubkey {
6388 &Some(ref script) => {
6389 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6390 if script.len() == 0 {
6393 if !script::is_bolt2_compliant(&script, their_features) {
6394 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6396 Some(script.clone())
6399 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6401 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6406 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6407 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6408 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6409 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6410 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6412 if peer_limits.trust_own_funding_0conf {
6413 self.context.minimum_depth = Some(msg.minimum_depth);
6415 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6418 let counterparty_pubkeys = ChannelPublicKeys {
6419 funding_pubkey: msg.funding_pubkey,
6420 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6421 payment_point: msg.payment_point,
6422 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6423 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6426 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6427 selected_contest_delay: msg.to_self_delay,
6428 pubkeys: counterparty_pubkeys,
6431 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6432 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6434 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6435 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6441 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6442 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6443 pub context: ChannelContext<SP>,
6444 pub unfunded_context: UnfundedChannelContext,
6447 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6448 /// Creates a new channel from a remote sides' request for one.
6449 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6450 pub fn new<ES: Deref, F: Deref, L: Deref>(
6451 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6452 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6453 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6454 current_chain_height: u32, logger: &L, is_0conf: bool,
6455 ) -> Result<InboundV1Channel<SP>, ChannelError>
6456 where ES::Target: EntropySource,
6457 F::Target: FeeEstimator,
6460 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6462 // First check the channel type is known, failing before we do anything else if we don't
6463 // support this channel type.
6464 let channel_type = if let Some(channel_type) = &msg.channel_type {
6465 if channel_type.supports_any_optional_bits() {
6466 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6469 // We only support the channel types defined by the `ChannelManager` in
6470 // `provided_channel_type_features`. The channel type must always support
6471 // `static_remote_key`.
6472 if !channel_type.requires_static_remote_key() {
6473 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6475 // Make sure we support all of the features behind the channel type.
6476 if !channel_type.is_subset(our_supported_features) {
6477 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6479 if channel_type.requires_scid_privacy() && announced_channel {
6480 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6482 channel_type.clone()
6484 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6485 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6486 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6491 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6492 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6493 let pubkeys = holder_signer.pubkeys().clone();
6494 let counterparty_pubkeys = ChannelPublicKeys {
6495 funding_pubkey: msg.funding_pubkey,
6496 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6497 payment_point: msg.payment_point,
6498 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6499 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6502 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6503 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6506 // Check sanity of message fields:
6507 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6508 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6510 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6511 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6513 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6514 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6516 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6517 if msg.push_msat > full_channel_value_msat {
6518 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6520 if msg.dust_limit_satoshis > msg.funding_satoshis {
6521 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6523 if msg.htlc_minimum_msat >= full_channel_value_msat {
6524 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6526 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6528 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6529 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6530 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6532 if msg.max_accepted_htlcs < 1 {
6533 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6535 if msg.max_accepted_htlcs > MAX_HTLCS {
6536 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6539 // Now check against optional parameters as set by config...
6540 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6541 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6543 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6544 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6546 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6547 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6549 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6550 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6552 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6553 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6555 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6556 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6558 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6559 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6562 // Convert things into internal flags and prep our state:
6564 if config.channel_handshake_limits.force_announced_channel_preference {
6565 if config.channel_handshake_config.announced_channel != announced_channel {
6566 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6570 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6571 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6572 // Protocol level safety check in place, although it should never happen because
6573 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6574 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6576 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6577 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6579 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6580 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6581 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6583 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6584 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6587 // check if the funder's amount for the initial commitment tx is sufficient
6588 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6589 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6590 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6594 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6595 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6596 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6597 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6600 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6601 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6602 // want to push much to us), our counterparty should always have more than our reserve.
6603 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6604 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6607 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6608 match &msg.shutdown_scriptpubkey {
6609 &Some(ref script) => {
6610 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6611 if script.len() == 0 {
6614 if !script::is_bolt2_compliant(&script, their_features) {
6615 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6617 Some(script.clone())
6620 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6622 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6627 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6628 match signer_provider.get_shutdown_scriptpubkey() {
6629 Ok(scriptpubkey) => Some(scriptpubkey),
6630 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6634 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6635 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6636 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6640 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6641 Ok(script) => script,
6642 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6645 let mut secp_ctx = Secp256k1::new();
6646 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6648 let minimum_depth = if is_0conf {
6651 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6655 context: ChannelContext {
6658 config: LegacyChannelConfig {
6659 options: config.channel_config.clone(),
6661 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6666 inbound_handshake_limits_override: None,
6668 temporary_channel_id: Some(msg.temporary_channel_id),
6669 channel_id: msg.temporary_channel_id,
6670 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6671 announcement_sigs_state: AnnouncementSigsState::NotSent,
6674 latest_monitor_update_id: 0,
6676 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6677 shutdown_scriptpubkey,
6680 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6681 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6682 value_to_self_msat: msg.push_msat,
6684 pending_inbound_htlcs: Vec::new(),
6685 pending_outbound_htlcs: Vec::new(),
6686 holding_cell_htlc_updates: Vec::new(),
6687 pending_update_fee: None,
6688 holding_cell_update_fee: None,
6689 next_holder_htlc_id: 0,
6690 next_counterparty_htlc_id: 0,
6691 update_time_counter: 1,
6693 resend_order: RAACommitmentOrder::CommitmentFirst,
6695 monitor_pending_channel_ready: false,
6696 monitor_pending_revoke_and_ack: false,
6697 monitor_pending_commitment_signed: false,
6698 monitor_pending_forwards: Vec::new(),
6699 monitor_pending_failures: Vec::new(),
6700 monitor_pending_finalized_fulfills: Vec::new(),
6702 signer_pending_commitment_update: false,
6703 signer_pending_funding: false,
6705 #[cfg(debug_assertions)]
6706 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6707 #[cfg(debug_assertions)]
6708 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6710 last_sent_closing_fee: None,
6711 pending_counterparty_closing_signed: None,
6712 expecting_peer_commitment_signed: false,
6713 closing_fee_limits: None,
6714 target_closing_feerate_sats_per_kw: None,
6716 funding_tx_confirmed_in: None,
6717 funding_tx_confirmation_height: 0,
6718 short_channel_id: None,
6719 channel_creation_height: current_chain_height,
6721 feerate_per_kw: msg.feerate_per_kw,
6722 channel_value_satoshis: msg.funding_satoshis,
6723 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6724 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6725 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6726 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6727 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6728 holder_selected_channel_reserve_satoshis,
6729 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6730 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6731 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6732 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6735 counterparty_forwarding_info: None,
6737 channel_transaction_parameters: ChannelTransactionParameters {
6738 holder_pubkeys: pubkeys,
6739 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6740 is_outbound_from_holder: false,
6741 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6742 selected_contest_delay: msg.to_self_delay,
6743 pubkeys: counterparty_pubkeys,
6745 funding_outpoint: None,
6746 channel_type_features: channel_type.clone()
6748 funding_transaction: None,
6749 is_batch_funding: None,
6751 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6752 counterparty_prev_commitment_point: None,
6753 counterparty_node_id,
6755 counterparty_shutdown_scriptpubkey,
6757 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6759 channel_update_status: ChannelUpdateStatus::Enabled,
6760 closing_signed_in_flight: false,
6762 announcement_sigs: None,
6764 #[cfg(any(test, fuzzing))]
6765 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6766 #[cfg(any(test, fuzzing))]
6767 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6769 workaround_lnd_bug_4006: None,
6770 sent_message_awaiting_response: None,
6772 latest_inbound_scid_alias: None,
6773 outbound_scid_alias: 0,
6775 channel_pending_event_emitted: false,
6776 channel_ready_event_emitted: false,
6778 #[cfg(any(test, fuzzing))]
6779 historical_inbound_htlc_fulfills: HashSet::new(),
6784 blocked_monitor_updates: Vec::new(),
6786 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6792 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6793 /// should be sent back to the counterparty node.
6795 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6796 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6797 if self.context.is_outbound() {
6798 panic!("Tried to send accept_channel for an outbound channel?");
6800 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6801 panic!("Tried to send accept_channel after channel had moved forward");
6803 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6804 panic!("Tried to send an accept_channel for a channel that has already advanced");
6807 self.generate_accept_channel_message()
6810 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6811 /// inbound channel. If the intention is to accept an inbound channel, use
6812 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6814 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6815 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6816 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6817 let keys = self.context.get_holder_pubkeys();
6819 msgs::AcceptChannel {
6820 temporary_channel_id: self.context.channel_id,
6821 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6822 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6823 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6824 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6825 minimum_depth: self.context.minimum_depth.unwrap(),
6826 to_self_delay: self.context.get_holder_selected_contest_delay(),
6827 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6828 funding_pubkey: keys.funding_pubkey,
6829 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6830 payment_point: keys.payment_point,
6831 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6832 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6833 first_per_commitment_point,
6834 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6835 Some(script) => script.clone().into_inner(),
6836 None => Builder::new().into_script(),
6838 channel_type: Some(self.context.channel_type.clone()),
6840 next_local_nonce: None,
6844 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6845 /// inbound channel without accepting it.
6847 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6849 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6850 self.generate_accept_channel_message()
6853 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6854 let funding_script = self.context.get_funding_redeemscript();
6856 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6857 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6858 let trusted_tx = initial_commitment_tx.trust();
6859 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6860 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6861 // They sign the holder commitment transaction...
6862 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6863 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6864 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6865 encode::serialize_hex(&funding_script), &self.context.channel_id());
6866 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6868 Ok(initial_commitment_tx)
6871 pub fn funding_created<L: Deref>(
6872 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6873 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6877 if self.context.is_outbound() {
6878 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6880 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6881 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6882 // remember the channel, so it's safe to just send an error_message here and drop the
6884 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6886 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6887 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6888 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6889 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6892 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6893 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6894 // This is an externally observable change before we finish all our checks. In particular
6895 // check_funding_created_signature may fail.
6896 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6898 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6900 Err(ChannelError::Close(e)) => {
6901 self.context.channel_transaction_parameters.funding_outpoint = None;
6902 return Err((self, ChannelError::Close(e)));
6905 // The only error we know how to handle is ChannelError::Close, so we fall over here
6906 // to make sure we don't continue with an inconsistent state.
6907 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6911 let holder_commitment_tx = HolderCommitmentTransaction::new(
6912 initial_commitment_tx,
6915 &self.context.get_holder_pubkeys().funding_pubkey,
6916 self.context.counterparty_funding_pubkey()
6919 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6920 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6923 // Now that we're past error-generating stuff, update our local state:
6925 self.context.channel_state = ChannelState::FundingSent as u32;
6926 self.context.channel_id = funding_txo.to_channel_id();
6927 self.context.cur_counterparty_commitment_transaction_number -= 1;
6928 self.context.cur_holder_commitment_transaction_number -= 1;
6930 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6932 let funding_redeemscript = self.context.get_funding_redeemscript();
6933 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6934 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6935 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6936 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6937 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6938 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6939 shutdown_script, self.context.get_holder_selected_contest_delay(),
6940 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6941 &self.context.channel_transaction_parameters,
6942 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6944 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6946 channel_monitor.provide_initial_counterparty_commitment_tx(
6947 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6948 self.context.cur_counterparty_commitment_transaction_number + 1,
6949 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6950 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6951 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6953 log_info!(logger, "{} funding_signed for peer for channel {}",
6954 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
6956 // Promote the channel to a full-fledged one now that we have updated the state and have a
6957 // `ChannelMonitor`.
6958 let mut channel = Channel {
6959 context: self.context,
6961 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6962 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6964 Ok((channel, funding_signed, channel_monitor))
6968 const SERIALIZATION_VERSION: u8 = 3;
6969 const MIN_SERIALIZATION_VERSION: u8 = 3;
6971 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6977 impl Writeable for ChannelUpdateStatus {
6978 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6979 // We only care about writing out the current state as it was announced, ie only either
6980 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6981 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6983 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6984 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6985 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6986 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6992 impl Readable for ChannelUpdateStatus {
6993 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6994 Ok(match <u8 as Readable>::read(reader)? {
6995 0 => ChannelUpdateStatus::Enabled,
6996 1 => ChannelUpdateStatus::Disabled,
6997 _ => return Err(DecodeError::InvalidValue),
7002 impl Writeable for AnnouncementSigsState {
7003 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7004 // We only care about writing out the current state as if we had just disconnected, at
7005 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7007 AnnouncementSigsState::NotSent => 0u8.write(writer),
7008 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7009 AnnouncementSigsState::Committed => 0u8.write(writer),
7010 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7015 impl Readable for AnnouncementSigsState {
7016 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7017 Ok(match <u8 as Readable>::read(reader)? {
7018 0 => AnnouncementSigsState::NotSent,
7019 1 => AnnouncementSigsState::PeerReceived,
7020 _ => return Err(DecodeError::InvalidValue),
7025 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7026 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7027 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7030 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7032 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7033 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7034 // the low bytes now and the optional high bytes later.
7035 let user_id_low = self.context.user_id as u64;
7036 user_id_low.write(writer)?;
7038 // Version 1 deserializers expected to read parts of the config object here. Version 2
7039 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7040 // `minimum_depth` we simply write dummy values here.
7041 writer.write_all(&[0; 8])?;
7043 self.context.channel_id.write(writer)?;
7044 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7045 self.context.channel_value_satoshis.write(writer)?;
7047 self.context.latest_monitor_update_id.write(writer)?;
7049 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7050 // deserialized from that format.
7051 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7052 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7053 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7055 self.context.destination_script.write(writer)?;
7057 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7058 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7059 self.context.value_to_self_msat.write(writer)?;
7061 let mut dropped_inbound_htlcs = 0;
7062 for htlc in self.context.pending_inbound_htlcs.iter() {
7063 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7064 dropped_inbound_htlcs += 1;
7067 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7068 for htlc in self.context.pending_inbound_htlcs.iter() {
7069 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7072 htlc.htlc_id.write(writer)?;
7073 htlc.amount_msat.write(writer)?;
7074 htlc.cltv_expiry.write(writer)?;
7075 htlc.payment_hash.write(writer)?;
7077 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7078 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7080 htlc_state.write(writer)?;
7082 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7084 htlc_state.write(writer)?;
7086 &InboundHTLCState::Committed => {
7089 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7091 removal_reason.write(writer)?;
7096 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7097 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7098 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7100 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7101 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7102 htlc.htlc_id.write(writer)?;
7103 htlc.amount_msat.write(writer)?;
7104 htlc.cltv_expiry.write(writer)?;
7105 htlc.payment_hash.write(writer)?;
7106 htlc.source.write(writer)?;
7108 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7110 onion_packet.write(writer)?;
7112 &OutboundHTLCState::Committed => {
7115 &OutboundHTLCState::RemoteRemoved(_) => {
7116 // Treat this as a Committed because we haven't received the CS - they'll
7117 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7120 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7122 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7123 preimages.push(preimage);
7125 let reason: Option<&HTLCFailReason> = outcome.into();
7126 reason.write(writer)?;
7128 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7130 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7131 preimages.push(preimage);
7133 let reason: Option<&HTLCFailReason> = outcome.into();
7134 reason.write(writer)?;
7137 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7138 if pending_outbound_skimmed_fees.is_empty() {
7139 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7141 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7142 } else if !pending_outbound_skimmed_fees.is_empty() {
7143 pending_outbound_skimmed_fees.push(None);
7145 pending_outbound_blinding_points.push(htlc.blinding_point);
7148 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7149 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7150 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7151 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7153 &HTLCUpdateAwaitingACK::AddHTLC {
7154 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7155 blinding_point, skimmed_fee_msat,
7158 amount_msat.write(writer)?;
7159 cltv_expiry.write(writer)?;
7160 payment_hash.write(writer)?;
7161 source.write(writer)?;
7162 onion_routing_packet.write(writer)?;
7164 if let Some(skimmed_fee) = skimmed_fee_msat {
7165 if holding_cell_skimmed_fees.is_empty() {
7166 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7168 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7169 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7171 holding_cell_blinding_points.push(blinding_point);
7173 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7175 payment_preimage.write(writer)?;
7176 htlc_id.write(writer)?;
7178 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7180 htlc_id.write(writer)?;
7181 err_packet.write(writer)?;
7186 match self.context.resend_order {
7187 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7188 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7191 self.context.monitor_pending_channel_ready.write(writer)?;
7192 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7193 self.context.monitor_pending_commitment_signed.write(writer)?;
7195 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7196 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7197 pending_forward.write(writer)?;
7198 htlc_id.write(writer)?;
7201 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7202 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7203 htlc_source.write(writer)?;
7204 payment_hash.write(writer)?;
7205 fail_reason.write(writer)?;
7208 if self.context.is_outbound() {
7209 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7210 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7211 Some(feerate).write(writer)?;
7213 // As for inbound HTLCs, if the update was only announced and never committed in a
7214 // commitment_signed, drop it.
7215 None::<u32>.write(writer)?;
7217 self.context.holding_cell_update_fee.write(writer)?;
7219 self.context.next_holder_htlc_id.write(writer)?;
7220 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7221 self.context.update_time_counter.write(writer)?;
7222 self.context.feerate_per_kw.write(writer)?;
7224 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7225 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7226 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7227 // consider the stale state on reload.
7230 self.context.funding_tx_confirmed_in.write(writer)?;
7231 self.context.funding_tx_confirmation_height.write(writer)?;
7232 self.context.short_channel_id.write(writer)?;
7234 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7235 self.context.holder_dust_limit_satoshis.write(writer)?;
7236 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7238 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7239 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7241 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7242 self.context.holder_htlc_minimum_msat.write(writer)?;
7243 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7245 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7246 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7248 match &self.context.counterparty_forwarding_info {
7251 info.fee_base_msat.write(writer)?;
7252 info.fee_proportional_millionths.write(writer)?;
7253 info.cltv_expiry_delta.write(writer)?;
7255 None => 0u8.write(writer)?
7258 self.context.channel_transaction_parameters.write(writer)?;
7259 self.context.funding_transaction.write(writer)?;
7261 self.context.counterparty_cur_commitment_point.write(writer)?;
7262 self.context.counterparty_prev_commitment_point.write(writer)?;
7263 self.context.counterparty_node_id.write(writer)?;
7265 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7267 self.context.commitment_secrets.write(writer)?;
7269 self.context.channel_update_status.write(writer)?;
7271 #[cfg(any(test, fuzzing))]
7272 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7273 #[cfg(any(test, fuzzing))]
7274 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7275 htlc.write(writer)?;
7278 // If the channel type is something other than only-static-remote-key, then we need to have
7279 // older clients fail to deserialize this channel at all. If the type is
7280 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7282 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7283 Some(&self.context.channel_type) } else { None };
7285 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7286 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7287 // a different percentage of the channel value then 10%, which older versions of LDK used
7288 // to set it to before the percentage was made configurable.
7289 let serialized_holder_selected_reserve =
7290 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7291 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7293 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7294 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7295 let serialized_holder_htlc_max_in_flight =
7296 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7297 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7299 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7300 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7302 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7303 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7304 // we write the high bytes as an option here.
7305 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7307 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7309 write_tlv_fields!(writer, {
7310 (0, self.context.announcement_sigs, option),
7311 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7312 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7313 // them twice, once with their original default values above, and once as an option
7314 // here. On the read side, old versions will simply ignore the odd-type entries here,
7315 // and new versions map the default values to None and allow the TLV entries here to
7317 (1, self.context.minimum_depth, option),
7318 (2, chan_type, option),
7319 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7320 (4, serialized_holder_selected_reserve, option),
7321 (5, self.context.config, required),
7322 (6, serialized_holder_htlc_max_in_flight, option),
7323 (7, self.context.shutdown_scriptpubkey, option),
7324 (8, self.context.blocked_monitor_updates, optional_vec),
7325 (9, self.context.target_closing_feerate_sats_per_kw, option),
7326 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7327 (13, self.context.channel_creation_height, required),
7328 (15, preimages, required_vec),
7329 (17, self.context.announcement_sigs_state, required),
7330 (19, self.context.latest_inbound_scid_alias, option),
7331 (21, self.context.outbound_scid_alias, required),
7332 (23, channel_ready_event_emitted, option),
7333 (25, user_id_high_opt, option),
7334 (27, self.context.channel_keys_id, required),
7335 (28, holder_max_accepted_htlcs, option),
7336 (29, self.context.temporary_channel_id, option),
7337 (31, channel_pending_event_emitted, option),
7338 (35, pending_outbound_skimmed_fees, optional_vec),
7339 (37, holding_cell_skimmed_fees, optional_vec),
7340 (38, self.context.is_batch_funding, option),
7341 (39, pending_outbound_blinding_points, optional_vec),
7342 (41, holding_cell_blinding_points, optional_vec),
7349 const MAX_ALLOC_SIZE: usize = 64*1024;
7350 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7352 ES::Target: EntropySource,
7353 SP::Target: SignerProvider
7355 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7356 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7357 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7359 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7360 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7361 // the low bytes now and the high bytes later.
7362 let user_id_low: u64 = Readable::read(reader)?;
7364 let mut config = Some(LegacyChannelConfig::default());
7366 // Read the old serialization of the ChannelConfig from version 0.0.98.
7367 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7368 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7369 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7370 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7372 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7373 let mut _val: u64 = Readable::read(reader)?;
7376 let channel_id = Readable::read(reader)?;
7377 let channel_state = Readable::read(reader)?;
7378 let channel_value_satoshis = Readable::read(reader)?;
7380 let latest_monitor_update_id = Readable::read(reader)?;
7382 let mut keys_data = None;
7384 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7385 // the `channel_keys_id` TLV is present below.
7386 let keys_len: u32 = Readable::read(reader)?;
7387 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7388 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7389 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7390 let mut data = [0; 1024];
7391 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7392 reader.read_exact(read_slice)?;
7393 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7397 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7398 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7399 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7402 let destination_script = Readable::read(reader)?;
7404 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7405 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7406 let value_to_self_msat = Readable::read(reader)?;
7408 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7410 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7411 for _ in 0..pending_inbound_htlc_count {
7412 pending_inbound_htlcs.push(InboundHTLCOutput {
7413 htlc_id: Readable::read(reader)?,
7414 amount_msat: Readable::read(reader)?,
7415 cltv_expiry: Readable::read(reader)?,
7416 payment_hash: Readable::read(reader)?,
7417 state: match <u8 as Readable>::read(reader)? {
7418 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7419 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7420 3 => InboundHTLCState::Committed,
7421 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7422 _ => return Err(DecodeError::InvalidValue),
7427 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7428 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7429 for _ in 0..pending_outbound_htlc_count {
7430 pending_outbound_htlcs.push(OutboundHTLCOutput {
7431 htlc_id: Readable::read(reader)?,
7432 amount_msat: Readable::read(reader)?,
7433 cltv_expiry: Readable::read(reader)?,
7434 payment_hash: Readable::read(reader)?,
7435 source: Readable::read(reader)?,
7436 state: match <u8 as Readable>::read(reader)? {
7437 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7438 1 => OutboundHTLCState::Committed,
7440 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7441 OutboundHTLCState::RemoteRemoved(option.into())
7444 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7445 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7448 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7449 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7451 _ => return Err(DecodeError::InvalidValue),
7453 skimmed_fee_msat: None,
7454 blinding_point: None,
7458 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7459 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7460 for _ in 0..holding_cell_htlc_update_count {
7461 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7462 0 => HTLCUpdateAwaitingACK::AddHTLC {
7463 amount_msat: Readable::read(reader)?,
7464 cltv_expiry: Readable::read(reader)?,
7465 payment_hash: Readable::read(reader)?,
7466 source: Readable::read(reader)?,
7467 onion_routing_packet: Readable::read(reader)?,
7468 skimmed_fee_msat: None,
7469 blinding_point: None,
7471 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7472 payment_preimage: Readable::read(reader)?,
7473 htlc_id: Readable::read(reader)?,
7475 2 => HTLCUpdateAwaitingACK::FailHTLC {
7476 htlc_id: Readable::read(reader)?,
7477 err_packet: Readable::read(reader)?,
7479 _ => return Err(DecodeError::InvalidValue),
7483 let resend_order = match <u8 as Readable>::read(reader)? {
7484 0 => RAACommitmentOrder::CommitmentFirst,
7485 1 => RAACommitmentOrder::RevokeAndACKFirst,
7486 _ => return Err(DecodeError::InvalidValue),
7489 let monitor_pending_channel_ready = Readable::read(reader)?;
7490 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7491 let monitor_pending_commitment_signed = Readable::read(reader)?;
7493 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7494 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7495 for _ in 0..monitor_pending_forwards_count {
7496 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7499 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7500 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7501 for _ in 0..monitor_pending_failures_count {
7502 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7505 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7507 let holding_cell_update_fee = Readable::read(reader)?;
7509 let next_holder_htlc_id = Readable::read(reader)?;
7510 let next_counterparty_htlc_id = Readable::read(reader)?;
7511 let update_time_counter = Readable::read(reader)?;
7512 let feerate_per_kw = Readable::read(reader)?;
7514 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7515 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7516 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7517 // consider the stale state on reload.
7518 match <u8 as Readable>::read(reader)? {
7521 let _: u32 = Readable::read(reader)?;
7522 let _: u64 = Readable::read(reader)?;
7523 let _: Signature = Readable::read(reader)?;
7525 _ => return Err(DecodeError::InvalidValue),
7528 let funding_tx_confirmed_in = Readable::read(reader)?;
7529 let funding_tx_confirmation_height = Readable::read(reader)?;
7530 let short_channel_id = Readable::read(reader)?;
7532 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7533 let holder_dust_limit_satoshis = Readable::read(reader)?;
7534 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7535 let mut counterparty_selected_channel_reserve_satoshis = None;
7537 // Read the old serialization from version 0.0.98.
7538 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7540 // Read the 8 bytes of backwards-compatibility data.
7541 let _dummy: u64 = Readable::read(reader)?;
7543 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7544 let holder_htlc_minimum_msat = Readable::read(reader)?;
7545 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7547 let mut minimum_depth = None;
7549 // Read the old serialization from version 0.0.98.
7550 minimum_depth = Some(Readable::read(reader)?);
7552 // Read the 4 bytes of backwards-compatibility data.
7553 let _dummy: u32 = Readable::read(reader)?;
7556 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7558 1 => Some(CounterpartyForwardingInfo {
7559 fee_base_msat: Readable::read(reader)?,
7560 fee_proportional_millionths: Readable::read(reader)?,
7561 cltv_expiry_delta: Readable::read(reader)?,
7563 _ => return Err(DecodeError::InvalidValue),
7566 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7567 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7569 let counterparty_cur_commitment_point = Readable::read(reader)?;
7571 let counterparty_prev_commitment_point = Readable::read(reader)?;
7572 let counterparty_node_id = Readable::read(reader)?;
7574 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7575 let commitment_secrets = Readable::read(reader)?;
7577 let channel_update_status = Readable::read(reader)?;
7579 #[cfg(any(test, fuzzing))]
7580 let mut historical_inbound_htlc_fulfills = HashSet::new();
7581 #[cfg(any(test, fuzzing))]
7583 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7584 for _ in 0..htlc_fulfills_len {
7585 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7589 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7590 Some((feerate, if channel_parameters.is_outbound_from_holder {
7591 FeeUpdateState::Outbound
7593 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7599 let mut announcement_sigs = None;
7600 let mut target_closing_feerate_sats_per_kw = None;
7601 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7602 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7603 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7604 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7605 // only, so we default to that if none was written.
7606 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7607 let mut channel_creation_height = Some(serialized_height);
7608 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7610 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7611 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7612 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7613 let mut latest_inbound_scid_alias = None;
7614 let mut outbound_scid_alias = None;
7615 let mut channel_pending_event_emitted = None;
7616 let mut channel_ready_event_emitted = None;
7618 let mut user_id_high_opt: Option<u64> = None;
7619 let mut channel_keys_id: Option<[u8; 32]> = None;
7620 let mut temporary_channel_id: Option<ChannelId> = None;
7621 let mut holder_max_accepted_htlcs: Option<u16> = None;
7623 let mut blocked_monitor_updates = Some(Vec::new());
7625 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7626 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7628 let mut is_batch_funding: Option<()> = None;
7630 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7631 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7633 read_tlv_fields!(reader, {
7634 (0, announcement_sigs, option),
7635 (1, minimum_depth, option),
7636 (2, channel_type, option),
7637 (3, counterparty_selected_channel_reserve_satoshis, option),
7638 (4, holder_selected_channel_reserve_satoshis, option),
7639 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7640 (6, holder_max_htlc_value_in_flight_msat, option),
7641 (7, shutdown_scriptpubkey, option),
7642 (8, blocked_monitor_updates, optional_vec),
7643 (9, target_closing_feerate_sats_per_kw, option),
7644 (11, monitor_pending_finalized_fulfills, optional_vec),
7645 (13, channel_creation_height, option),
7646 (15, preimages_opt, optional_vec),
7647 (17, announcement_sigs_state, option),
7648 (19, latest_inbound_scid_alias, option),
7649 (21, outbound_scid_alias, option),
7650 (23, channel_ready_event_emitted, option),
7651 (25, user_id_high_opt, option),
7652 (27, channel_keys_id, option),
7653 (28, holder_max_accepted_htlcs, option),
7654 (29, temporary_channel_id, option),
7655 (31, channel_pending_event_emitted, option),
7656 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7657 (37, holding_cell_skimmed_fees_opt, optional_vec),
7658 (38, is_batch_funding, option),
7659 (39, pending_outbound_blinding_points_opt, optional_vec),
7660 (41, holding_cell_blinding_points_opt, optional_vec),
7663 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7664 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7665 // If we've gotten to the funding stage of the channel, populate the signer with its
7666 // required channel parameters.
7667 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7668 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7669 holder_signer.provide_channel_parameters(&channel_parameters);
7671 (channel_keys_id, holder_signer)
7673 // `keys_data` can be `None` if we had corrupted data.
7674 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7675 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7676 (holder_signer.channel_keys_id(), holder_signer)
7679 if let Some(preimages) = preimages_opt {
7680 let mut iter = preimages.into_iter();
7681 for htlc in pending_outbound_htlcs.iter_mut() {
7683 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7684 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7686 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7687 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7692 // We expect all preimages to be consumed above
7693 if iter.next().is_some() {
7694 return Err(DecodeError::InvalidValue);
7698 let chan_features = channel_type.as_ref().unwrap();
7699 if !chan_features.is_subset(our_supported_features) {
7700 // If the channel was written by a new version and negotiated with features we don't
7701 // understand yet, refuse to read it.
7702 return Err(DecodeError::UnknownRequiredFeature);
7705 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7706 // To account for that, we're proactively setting/overriding the field here.
7707 channel_parameters.channel_type_features = chan_features.clone();
7709 let mut secp_ctx = Secp256k1::new();
7710 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7712 // `user_id` used to be a single u64 value. In order to remain backwards
7713 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7714 // separate u64 values.
7715 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7717 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7719 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7720 let mut iter = skimmed_fees.into_iter();
7721 for htlc in pending_outbound_htlcs.iter_mut() {
7722 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7724 // We expect all skimmed fees to be consumed above
7725 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7727 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7728 let mut iter = skimmed_fees.into_iter();
7729 for htlc in holding_cell_htlc_updates.iter_mut() {
7730 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7731 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7734 // We expect all skimmed fees to be consumed above
7735 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7737 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
7738 let mut iter = blinding_pts.into_iter();
7739 for htlc in pending_outbound_htlcs.iter_mut() {
7740 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7742 // We expect all blinding points to be consumed above
7743 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7745 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
7746 let mut iter = blinding_pts.into_iter();
7747 for htlc in holding_cell_htlc_updates.iter_mut() {
7748 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
7749 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7752 // We expect all blinding points to be consumed above
7753 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7757 context: ChannelContext {
7760 config: config.unwrap(),
7764 // Note that we don't care about serializing handshake limits as we only ever serialize
7765 // channel data after the handshake has completed.
7766 inbound_handshake_limits_override: None,
7769 temporary_channel_id,
7771 announcement_sigs_state: announcement_sigs_state.unwrap(),
7773 channel_value_satoshis,
7775 latest_monitor_update_id,
7777 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7778 shutdown_scriptpubkey,
7781 cur_holder_commitment_transaction_number,
7782 cur_counterparty_commitment_transaction_number,
7785 holder_max_accepted_htlcs,
7786 pending_inbound_htlcs,
7787 pending_outbound_htlcs,
7788 holding_cell_htlc_updates,
7792 monitor_pending_channel_ready,
7793 monitor_pending_revoke_and_ack,
7794 monitor_pending_commitment_signed,
7795 monitor_pending_forwards,
7796 monitor_pending_failures,
7797 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7799 signer_pending_commitment_update: false,
7800 signer_pending_funding: false,
7803 holding_cell_update_fee,
7804 next_holder_htlc_id,
7805 next_counterparty_htlc_id,
7806 update_time_counter,
7809 #[cfg(debug_assertions)]
7810 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7811 #[cfg(debug_assertions)]
7812 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7814 last_sent_closing_fee: None,
7815 pending_counterparty_closing_signed: None,
7816 expecting_peer_commitment_signed: false,
7817 closing_fee_limits: None,
7818 target_closing_feerate_sats_per_kw,
7820 funding_tx_confirmed_in,
7821 funding_tx_confirmation_height,
7823 channel_creation_height: channel_creation_height.unwrap(),
7825 counterparty_dust_limit_satoshis,
7826 holder_dust_limit_satoshis,
7827 counterparty_max_htlc_value_in_flight_msat,
7828 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7829 counterparty_selected_channel_reserve_satoshis,
7830 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7831 counterparty_htlc_minimum_msat,
7832 holder_htlc_minimum_msat,
7833 counterparty_max_accepted_htlcs,
7836 counterparty_forwarding_info,
7838 channel_transaction_parameters: channel_parameters,
7839 funding_transaction,
7842 counterparty_cur_commitment_point,
7843 counterparty_prev_commitment_point,
7844 counterparty_node_id,
7846 counterparty_shutdown_scriptpubkey,
7850 channel_update_status,
7851 closing_signed_in_flight: false,
7855 #[cfg(any(test, fuzzing))]
7856 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7857 #[cfg(any(test, fuzzing))]
7858 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7860 workaround_lnd_bug_4006: None,
7861 sent_message_awaiting_response: None,
7863 latest_inbound_scid_alias,
7864 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7865 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7867 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7868 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7870 #[cfg(any(test, fuzzing))]
7871 historical_inbound_htlc_fulfills,
7873 channel_type: channel_type.unwrap(),
7876 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7885 use bitcoin::blockdata::constants::ChainHash;
7886 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7887 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7888 use bitcoin::blockdata::opcodes;
7889 use bitcoin::network::constants::Network;
7890 use crate::ln::PaymentHash;
7891 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7892 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7893 use crate::ln::channel::InitFeatures;
7894 use crate::ln::channel::{ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7895 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7896 use crate::ln::features::ChannelTypeFeatures;
7897 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7898 use crate::ln::script::ShutdownScript;
7899 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7900 use crate::chain::BestBlock;
7901 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7902 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7903 use crate::chain::transaction::OutPoint;
7904 use crate::routing::router::Path;
7905 use crate::util::config::UserConfig;
7906 use crate::util::errors::APIError;
7907 use crate::util::test_utils;
7908 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7909 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7910 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7911 use bitcoin::secp256k1::{SecretKey,PublicKey};
7912 use bitcoin::hashes::sha256::Hash as Sha256;
7913 use bitcoin::hashes::Hash;
7914 use bitcoin::hashes::hex::FromHex;
7915 use bitcoin::hash_types::WPubkeyHash;
7916 use bitcoin::blockdata::locktime::absolute::LockTime;
7917 use bitcoin::address::{WitnessProgram, WitnessVersion};
7918 use crate::prelude::*;
7920 struct TestFeeEstimator {
7923 impl FeeEstimator for TestFeeEstimator {
7924 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7930 fn test_max_funding_satoshis_no_wumbo() {
7931 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7932 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7933 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7937 signer: InMemorySigner,
7940 impl EntropySource for Keys {
7941 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7944 impl SignerProvider for Keys {
7945 type EcdsaSigner = InMemorySigner;
7947 type TaprootSigner = InMemorySigner;
7949 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7950 self.signer.channel_keys_id()
7953 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
7957 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
7959 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
7960 let secp_ctx = Secp256k1::signing_only();
7961 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7962 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7963 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
7966 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7967 let secp_ctx = Secp256k1::signing_only();
7968 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7969 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7973 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7974 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7975 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
7979 fn upfront_shutdown_script_incompatibility() {
7980 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7981 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
7982 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
7985 let seed = [42; 32];
7986 let network = Network::Testnet;
7987 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7988 keys_provider.expect(OnGetShutdownScriptpubkey {
7989 returns: non_v0_segwit_shutdown_script.clone(),
7992 let secp_ctx = Secp256k1::new();
7993 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7994 let config = UserConfig::default();
7995 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
7996 Err(APIError::IncompatibleShutdownScript { script }) => {
7997 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7999 Err(e) => panic!("Unexpected error: {:?}", e),
8000 Ok(_) => panic!("Expected error"),
8004 // Check that, during channel creation, we use the same feerate in the open channel message
8005 // as we do in the Channel object creation itself.
8007 fn test_open_channel_msg_fee() {
8008 let original_fee = 253;
8009 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8010 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8011 let secp_ctx = Secp256k1::new();
8012 let seed = [42; 32];
8013 let network = Network::Testnet;
8014 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8016 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8017 let config = UserConfig::default();
8018 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8020 // Now change the fee so we can check that the fee in the open_channel message is the
8021 // same as the old fee.
8022 fee_est.fee_est = 500;
8023 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8024 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8028 fn test_holder_vs_counterparty_dust_limit() {
8029 // Test that when calculating the local and remote commitment transaction fees, the correct
8030 // dust limits are used.
8031 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8032 let secp_ctx = Secp256k1::new();
8033 let seed = [42; 32];
8034 let network = Network::Testnet;
8035 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8036 let logger = test_utils::TestLogger::new();
8037 let best_block = BestBlock::from_network(network);
8039 // Go through the flow of opening a channel between two nodes, making sure
8040 // they have different dust limits.
8042 // Create Node A's channel pointing to Node B's pubkey
8043 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8044 let config = UserConfig::default();
8045 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8047 // Create Node B's channel by receiving Node A's open_channel message
8048 // Make sure A's dust limit is as we expect.
8049 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8050 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8051 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8053 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8054 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8055 accept_channel_msg.dust_limit_satoshis = 546;
8056 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8057 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8059 // Node A --> Node B: funding created
8060 let output_script = node_a_chan.context.get_funding_redeemscript();
8061 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8062 value: 10000000, script_pubkey: output_script.clone(),
8064 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8065 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8066 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8068 // Node B --> Node A: funding signed
8069 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8071 // Put some inbound and outbound HTLCs in A's channel.
8072 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8073 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8075 amount_msat: htlc_amount_msat,
8076 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8077 cltv_expiry: 300000000,
8078 state: InboundHTLCState::Committed,
8081 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8083 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8084 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8085 cltv_expiry: 200000000,
8086 state: OutboundHTLCState::Committed,
8087 source: HTLCSource::OutboundRoute {
8088 path: Path { hops: Vec::new(), blinded_tail: None },
8089 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8090 first_hop_htlc_msat: 548,
8091 payment_id: PaymentId([42; 32]),
8093 skimmed_fee_msat: None,
8094 blinding_point: None,
8097 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8098 // the dust limit check.
8099 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8100 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8101 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8102 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8104 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8105 // of the HTLCs are seen to be above the dust limit.
8106 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8107 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8108 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8109 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8110 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8114 fn test_timeout_vs_success_htlc_dust_limit() {
8115 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8116 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8117 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8118 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8119 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8120 let secp_ctx = Secp256k1::new();
8121 let seed = [42; 32];
8122 let network = Network::Testnet;
8123 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8125 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8126 let config = UserConfig::default();
8127 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8129 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8130 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8132 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8133 // counted as dust when it shouldn't be.
8134 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8135 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8136 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8137 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8139 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8140 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8141 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8142 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8143 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8145 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8147 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8148 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8149 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8150 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8151 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8153 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8154 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8155 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8156 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8157 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8161 fn channel_reestablish_no_updates() {
8162 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8163 let logger = test_utils::TestLogger::new();
8164 let secp_ctx = Secp256k1::new();
8165 let seed = [42; 32];
8166 let network = Network::Testnet;
8167 let best_block = BestBlock::from_network(network);
8168 let chain_hash = ChainHash::using_genesis_block(network);
8169 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8171 // Go through the flow of opening a channel between two nodes.
8173 // Create Node A's channel pointing to Node B's pubkey
8174 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8175 let config = UserConfig::default();
8176 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8178 // Create Node B's channel by receiving Node A's open_channel message
8179 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8180 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8181 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8183 // Node B --> Node A: accept channel
8184 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8185 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8187 // Node A --> Node B: funding created
8188 let output_script = node_a_chan.context.get_funding_redeemscript();
8189 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8190 value: 10000000, script_pubkey: output_script.clone(),
8192 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8193 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8194 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8196 // Node B --> Node A: funding signed
8197 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8199 // Now disconnect the two nodes and check that the commitment point in
8200 // Node B's channel_reestablish message is sane.
8201 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8202 let msg = node_b_chan.get_channel_reestablish(&&logger);
8203 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8204 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8205 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8207 // Check that the commitment point in Node A's channel_reestablish message
8209 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8210 let msg = node_a_chan.get_channel_reestablish(&&logger);
8211 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8212 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8213 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8217 fn test_configured_holder_max_htlc_value_in_flight() {
8218 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8219 let logger = test_utils::TestLogger::new();
8220 let secp_ctx = Secp256k1::new();
8221 let seed = [42; 32];
8222 let network = Network::Testnet;
8223 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8224 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8225 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8227 let mut config_2_percent = UserConfig::default();
8228 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8229 let mut config_99_percent = UserConfig::default();
8230 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8231 let mut config_0_percent = UserConfig::default();
8232 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8233 let mut config_101_percent = UserConfig::default();
8234 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8236 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8237 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8238 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8239 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8240 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8241 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8243 // Test with the upper bound - 1 of valid values (99%).
8244 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8245 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8246 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8248 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8250 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8251 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8252 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8253 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8254 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8255 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8257 // Test with the upper bound - 1 of valid values (99%).
8258 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8259 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8260 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8262 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8263 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8264 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8265 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8266 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8268 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8269 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8271 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8272 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8273 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8275 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8276 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8277 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8278 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8279 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8281 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8282 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8284 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8285 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8286 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8290 fn test_configured_holder_selected_channel_reserve_satoshis() {
8292 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8293 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8294 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8296 // Test with valid but unreasonably high channel reserves
8297 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8298 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8299 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8301 // Test with calculated channel reserve less than lower bound
8302 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8303 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8305 // Test with invalid channel reserves since sum of both is greater than or equal
8307 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8308 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8311 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8312 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8313 let logger = test_utils::TestLogger::new();
8314 let secp_ctx = Secp256k1::new();
8315 let seed = [42; 32];
8316 let network = Network::Testnet;
8317 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8318 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8319 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8322 let mut outbound_node_config = UserConfig::default();
8323 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8324 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8326 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8327 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8329 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8330 let mut inbound_node_config = UserConfig::default();
8331 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8333 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8334 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8336 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8338 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8339 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8341 // Channel Negotiations failed
8342 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8343 assert!(result.is_err());
8348 fn channel_update() {
8349 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8350 let logger = test_utils::TestLogger::new();
8351 let secp_ctx = Secp256k1::new();
8352 let seed = [42; 32];
8353 let network = Network::Testnet;
8354 let best_block = BestBlock::from_network(network);
8355 let chain_hash = ChainHash::using_genesis_block(network);
8356 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8358 // Create Node A's channel pointing to Node B's pubkey
8359 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8360 let config = UserConfig::default();
8361 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8363 // Create Node B's channel by receiving Node A's open_channel message
8364 // Make sure A's dust limit is as we expect.
8365 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8366 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8367 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8369 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8370 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8371 accept_channel_msg.dust_limit_satoshis = 546;
8372 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8373 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8375 // Node A --> Node B: funding created
8376 let output_script = node_a_chan.context.get_funding_redeemscript();
8377 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8378 value: 10000000, script_pubkey: output_script.clone(),
8380 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8381 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8382 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8384 // Node B --> Node A: funding signed
8385 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8387 // Make sure that receiving a channel update will update the Channel as expected.
8388 let update = ChannelUpdate {
8389 contents: UnsignedChannelUpdate {
8391 short_channel_id: 0,
8394 cltv_expiry_delta: 100,
8395 htlc_minimum_msat: 5,
8396 htlc_maximum_msat: MAX_VALUE_MSAT,
8398 fee_proportional_millionths: 11,
8399 excess_data: Vec::new(),
8401 signature: Signature::from(unsafe { FFISignature::new() })
8403 assert!(node_a_chan.channel_update(&update).unwrap());
8405 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8406 // change our official htlc_minimum_msat.
8407 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8408 match node_a_chan.context.counterparty_forwarding_info() {
8410 assert_eq!(info.cltv_expiry_delta, 100);
8411 assert_eq!(info.fee_base_msat, 110);
8412 assert_eq!(info.fee_proportional_millionths, 11);
8414 None => panic!("expected counterparty forwarding info to be Some")
8417 assert!(!node_a_chan.channel_update(&update).unwrap());
8420 #[cfg(feature = "_test_vectors")]
8422 fn outbound_commitment_test() {
8423 use bitcoin::sighash;
8424 use bitcoin::consensus::encode::serialize;
8425 use bitcoin::sighash::EcdsaSighashType;
8426 use bitcoin::hashes::hex::FromHex;
8427 use bitcoin::hash_types::Txid;
8428 use bitcoin::secp256k1::Message;
8429 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8430 use crate::ln::PaymentPreimage;
8431 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8432 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8433 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8434 use crate::util::logger::Logger;
8435 use crate::sync::Arc;
8436 use core::str::FromStr;
8437 use hex::DisplayHex;
8439 // Test vectors from BOLT 3 Appendices C and F (anchors):
8440 let feeest = TestFeeEstimator{fee_est: 15000};
8441 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8442 let secp_ctx = Secp256k1::new();
8444 let mut signer = InMemorySigner::new(
8446 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8447 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8448 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8449 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8450 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8452 // These aren't set in the test vectors:
8453 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8459 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8460 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8461 let keys_provider = Keys { signer: signer.clone() };
8463 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8464 let mut config = UserConfig::default();
8465 config.channel_handshake_config.announced_channel = false;
8466 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8467 chan.context.holder_dust_limit_satoshis = 546;
8468 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8470 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8472 let counterparty_pubkeys = ChannelPublicKeys {
8473 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8474 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8475 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8476 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8477 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8479 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8480 CounterpartyChannelTransactionParameters {
8481 pubkeys: counterparty_pubkeys.clone(),
8482 selected_contest_delay: 144
8484 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8485 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8487 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8488 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8490 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8491 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8493 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8494 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8496 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8497 // derived from a commitment_seed, so instead we copy it here and call
8498 // build_commitment_transaction.
8499 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8500 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8501 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8502 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8503 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8505 macro_rules! test_commitment {
8506 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8507 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8508 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8512 macro_rules! test_commitment_with_anchors {
8513 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8514 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8515 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8519 macro_rules! test_commitment_common {
8520 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8521 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8523 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8524 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8526 let htlcs = commitment_stats.htlcs_included.drain(..)
8527 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8529 (commitment_stats.tx, htlcs)
8531 let trusted_tx = commitment_tx.trust();
8532 let unsigned_tx = trusted_tx.built_transaction();
8533 let redeemscript = chan.context.get_funding_redeemscript();
8534 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8535 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8536 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8537 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8539 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8540 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8541 let mut counterparty_htlc_sigs = Vec::new();
8542 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8544 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8545 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8546 counterparty_htlc_sigs.push(remote_signature);
8548 assert_eq!(htlcs.len(), per_htlc.len());
8550 let holder_commitment_tx = HolderCommitmentTransaction::new(
8551 commitment_tx.clone(),
8552 counterparty_signature,
8553 counterparty_htlc_sigs,
8554 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8555 chan.context.counterparty_funding_pubkey()
8557 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8558 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8560 let funding_redeemscript = chan.context.get_funding_redeemscript();
8561 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8562 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8564 // ((htlc, counterparty_sig), (index, holder_sig))
8565 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8568 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8569 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8571 let ref htlc = htlcs[$htlc_idx];
8572 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8573 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8574 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8575 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8576 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8577 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8578 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8580 let mut preimage: Option<PaymentPreimage> = None;
8583 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8584 if out == htlc.payment_hash {
8585 preimage = Some(PaymentPreimage([i; 32]));
8589 assert!(preimage.is_some());
8592 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8593 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8594 channel_derivation_parameters: ChannelDerivationParameters {
8595 value_satoshis: chan.context.channel_value_satoshis,
8596 keys_id: chan.context.channel_keys_id,
8597 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8599 commitment_txid: trusted_tx.txid(),
8600 per_commitment_number: trusted_tx.commitment_number(),
8601 per_commitment_point: trusted_tx.per_commitment_point(),
8602 feerate_per_kw: trusted_tx.feerate_per_kw(),
8604 preimage: preimage.clone(),
8605 counterparty_sig: *htlc_counterparty_sig,
8606 }, &secp_ctx).unwrap();
8607 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8608 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8610 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8611 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8612 let trusted_tx = holder_commitment_tx.trust();
8613 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8614 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8615 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8617 assert!(htlc_counterparty_sig_iter.next().is_none());
8621 // anchors: simple commitment tx with no HTLCs and single anchor
8622 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8623 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8624 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8626 // simple commitment tx with no HTLCs
8627 chan.context.value_to_self_msat = 7000000000;
8629 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8630 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8631 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8633 // anchors: simple commitment tx with no HTLCs
8634 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8635 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8636 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8638 chan.context.pending_inbound_htlcs.push({
8639 let mut out = InboundHTLCOutput{
8641 amount_msat: 1000000,
8643 payment_hash: PaymentHash([0; 32]),
8644 state: InboundHTLCState::Committed,
8646 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8649 chan.context.pending_inbound_htlcs.push({
8650 let mut out = InboundHTLCOutput{
8652 amount_msat: 2000000,
8654 payment_hash: PaymentHash([0; 32]),
8655 state: InboundHTLCState::Committed,
8657 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8660 chan.context.pending_outbound_htlcs.push({
8661 let mut out = OutboundHTLCOutput{
8663 amount_msat: 2000000,
8665 payment_hash: PaymentHash([0; 32]),
8666 state: OutboundHTLCState::Committed,
8667 source: HTLCSource::dummy(),
8668 skimmed_fee_msat: None,
8669 blinding_point: None,
8671 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8674 chan.context.pending_outbound_htlcs.push({
8675 let mut out = OutboundHTLCOutput{
8677 amount_msat: 3000000,
8679 payment_hash: PaymentHash([0; 32]),
8680 state: OutboundHTLCState::Committed,
8681 source: HTLCSource::dummy(),
8682 skimmed_fee_msat: None,
8683 blinding_point: None,
8685 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8688 chan.context.pending_inbound_htlcs.push({
8689 let mut out = InboundHTLCOutput{
8691 amount_msat: 4000000,
8693 payment_hash: PaymentHash([0; 32]),
8694 state: InboundHTLCState::Committed,
8696 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8700 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8701 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8702 chan.context.feerate_per_kw = 0;
8704 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8705 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8706 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8709 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8710 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8711 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8714 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8715 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8716 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8719 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8720 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8721 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8724 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8725 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8726 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8729 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8730 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8731 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8734 // commitment tx with seven outputs untrimmed (maximum feerate)
8735 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8736 chan.context.feerate_per_kw = 647;
8738 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8739 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8740 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8743 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8744 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8745 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8748 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8749 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8750 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8753 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8754 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8755 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8758 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8759 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8760 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8763 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8764 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8765 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8768 // commitment tx with six outputs untrimmed (minimum feerate)
8769 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8770 chan.context.feerate_per_kw = 648;
8772 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8773 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8774 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8777 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8778 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8779 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8782 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8783 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8784 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8787 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8788 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8789 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8792 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8793 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8794 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8797 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8798 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8799 chan.context.feerate_per_kw = 645;
8800 chan.context.holder_dust_limit_satoshis = 1001;
8802 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8803 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8804 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8807 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8808 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8809 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8812 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8813 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8814 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8817 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8818 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8819 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8822 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8823 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8824 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8827 // commitment tx with six outputs untrimmed (maximum feerate)
8828 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8829 chan.context.feerate_per_kw = 2069;
8830 chan.context.holder_dust_limit_satoshis = 546;
8832 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8833 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8834 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8837 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8838 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8839 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8842 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8843 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8844 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8847 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8848 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8849 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8852 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8853 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8854 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8857 // commitment tx with five outputs untrimmed (minimum feerate)
8858 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8859 chan.context.feerate_per_kw = 2070;
8861 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8862 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8863 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8866 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8867 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8868 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8871 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8872 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8873 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8876 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8877 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8878 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8881 // commitment tx with five outputs untrimmed (maximum feerate)
8882 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8883 chan.context.feerate_per_kw = 2194;
8885 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8886 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8887 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8890 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8891 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8892 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8895 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8896 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8897 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8900 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8901 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8902 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8905 // commitment tx with four outputs untrimmed (minimum feerate)
8906 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8907 chan.context.feerate_per_kw = 2195;
8909 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8910 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8911 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8914 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8915 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8916 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8919 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8920 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8921 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8924 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8925 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8926 chan.context.feerate_per_kw = 2185;
8927 chan.context.holder_dust_limit_satoshis = 2001;
8928 let cached_channel_type = chan.context.channel_type;
8929 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8931 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8932 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8933 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8936 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8937 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8938 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8941 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8942 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8943 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8946 // commitment tx with four outputs untrimmed (maximum feerate)
8947 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8948 chan.context.feerate_per_kw = 3702;
8949 chan.context.holder_dust_limit_satoshis = 546;
8950 chan.context.channel_type = cached_channel_type.clone();
8952 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8953 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8954 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8957 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8958 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8959 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8962 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8963 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8964 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8967 // commitment tx with three outputs untrimmed (minimum feerate)
8968 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8969 chan.context.feerate_per_kw = 3703;
8971 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8972 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8973 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8976 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8977 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8978 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8981 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8982 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8983 chan.context.feerate_per_kw = 3687;
8984 chan.context.holder_dust_limit_satoshis = 3001;
8985 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8987 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8988 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8989 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8992 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8993 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8994 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8997 // commitment tx with three outputs untrimmed (maximum feerate)
8998 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8999 chan.context.feerate_per_kw = 4914;
9000 chan.context.holder_dust_limit_satoshis = 546;
9001 chan.context.channel_type = cached_channel_type.clone();
9003 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9004 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9005 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9008 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9009 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9010 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9013 // commitment tx with two outputs untrimmed (minimum feerate)
9014 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9015 chan.context.feerate_per_kw = 4915;
9016 chan.context.holder_dust_limit_satoshis = 546;
9018 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9019 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9020 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9022 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9023 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9024 chan.context.feerate_per_kw = 4894;
9025 chan.context.holder_dust_limit_satoshis = 4001;
9026 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9028 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9029 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9030 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9032 // commitment tx with two outputs untrimmed (maximum feerate)
9033 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9034 chan.context.feerate_per_kw = 9651180;
9035 chan.context.holder_dust_limit_satoshis = 546;
9036 chan.context.channel_type = cached_channel_type.clone();
9038 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9039 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9040 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9042 // commitment tx with one output untrimmed (minimum feerate)
9043 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9044 chan.context.feerate_per_kw = 9651181;
9046 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9047 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9048 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9050 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9051 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9052 chan.context.feerate_per_kw = 6216010;
9053 chan.context.holder_dust_limit_satoshis = 4001;
9054 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9056 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9057 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9058 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9060 // commitment tx with fee greater than funder amount
9061 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9062 chan.context.feerate_per_kw = 9651936;
9063 chan.context.holder_dust_limit_satoshis = 546;
9064 chan.context.channel_type = cached_channel_type;
9066 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9067 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9068 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9070 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9071 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9072 chan.context.feerate_per_kw = 253;
9073 chan.context.pending_inbound_htlcs.clear();
9074 chan.context.pending_inbound_htlcs.push({
9075 let mut out = InboundHTLCOutput{
9077 amount_msat: 2000000,
9079 payment_hash: PaymentHash([0; 32]),
9080 state: InboundHTLCState::Committed,
9082 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9085 chan.context.pending_outbound_htlcs.clear();
9086 chan.context.pending_outbound_htlcs.push({
9087 let mut out = OutboundHTLCOutput{
9089 amount_msat: 5000001,
9091 payment_hash: PaymentHash([0; 32]),
9092 state: OutboundHTLCState::Committed,
9093 source: HTLCSource::dummy(),
9094 skimmed_fee_msat: None,
9095 blinding_point: None,
9097 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9100 chan.context.pending_outbound_htlcs.push({
9101 let mut out = OutboundHTLCOutput{
9103 amount_msat: 5000000,
9105 payment_hash: PaymentHash([0; 32]),
9106 state: OutboundHTLCState::Committed,
9107 source: HTLCSource::dummy(),
9108 skimmed_fee_msat: None,
9109 blinding_point: None,
9111 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9115 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9116 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9117 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9120 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9121 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9122 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9124 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9125 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9126 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9128 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9129 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9130 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9133 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9134 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9135 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9136 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9139 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9140 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9141 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9143 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9144 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9145 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9147 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9148 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9149 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9154 fn test_per_commitment_secret_gen() {
9155 // Test vectors from BOLT 3 Appendix D:
9157 let mut seed = [0; 32];
9158 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9159 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9160 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9162 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9163 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9164 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9166 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9167 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9169 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9170 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9172 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9173 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9174 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9178 fn test_key_derivation() {
9179 // Test vectors from BOLT 3 Appendix E:
9180 let secp_ctx = Secp256k1::new();
9182 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9183 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9185 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9186 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9188 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9189 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9191 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9192 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9194 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9195 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9197 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9198 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9202 fn test_zero_conf_channel_type_support() {
9203 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9204 let secp_ctx = Secp256k1::new();
9205 let seed = [42; 32];
9206 let network = Network::Testnet;
9207 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9208 let logger = test_utils::TestLogger::new();
9210 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9211 let config = UserConfig::default();
9212 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9213 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9215 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9216 channel_type_features.set_zero_conf_required();
9218 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9219 open_channel_msg.channel_type = Some(channel_type_features);
9220 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9221 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9222 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9223 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9224 assert!(res.is_ok());
9228 fn test_supports_anchors_zero_htlc_tx_fee() {
9229 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9230 // resulting `channel_type`.
9231 let secp_ctx = Secp256k1::new();
9232 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9233 let network = Network::Testnet;
9234 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9235 let logger = test_utils::TestLogger::new();
9237 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9238 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9240 let mut config = UserConfig::default();
9241 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9243 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9244 // need to signal it.
9245 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9246 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9247 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9248 &config, 0, 42, None
9250 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9252 let mut expected_channel_type = ChannelTypeFeatures::empty();
9253 expected_channel_type.set_static_remote_key_required();
9254 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9256 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9257 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9258 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9262 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9263 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9264 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9265 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9266 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9269 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9270 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9274 fn test_rejects_implicit_simple_anchors() {
9275 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9276 // each side's `InitFeatures`, it is rejected.
9277 let secp_ctx = Secp256k1::new();
9278 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9279 let network = Network::Testnet;
9280 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9281 let logger = test_utils::TestLogger::new();
9283 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9284 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9286 let config = UserConfig::default();
9288 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9289 let static_remote_key_required: u64 = 1 << 12;
9290 let simple_anchors_required: u64 = 1 << 20;
9291 let raw_init_features = static_remote_key_required | simple_anchors_required;
9292 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9294 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9295 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9296 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9300 // Set `channel_type` to `None` to force the implicit feature negotiation.
9301 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9302 open_channel_msg.channel_type = None;
9304 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9305 // `static_remote_key`, it will fail the channel.
9306 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9307 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9308 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9309 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9311 assert!(channel_b.is_err());
9315 fn test_rejects_simple_anchors_channel_type() {
9316 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9318 let secp_ctx = Secp256k1::new();
9319 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9320 let network = Network::Testnet;
9321 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9322 let logger = test_utils::TestLogger::new();
9324 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9325 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9327 let config = UserConfig::default();
9329 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9330 let static_remote_key_required: u64 = 1 << 12;
9331 let simple_anchors_required: u64 = 1 << 20;
9332 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9333 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9334 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9335 assert!(!simple_anchors_init.requires_unknown_bits());
9336 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9338 // First, we'll try to open a channel between A and B where A requests a channel type for
9339 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9340 // B as it's not supported by LDK.
9341 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9342 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9343 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9347 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9348 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9350 let res = InboundV1Channel::<&TestKeysInterface>::new(
9351 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9352 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9353 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9355 assert!(res.is_err());
9357 // Then, we'll try to open another channel where A requests a channel type for
9358 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9359 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9361 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9362 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9363 10000000, 100000, 42, &config, 0, 42, None
9366 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9368 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9369 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9370 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9371 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9374 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9375 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9377 let res = channel_a.accept_channel(
9378 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9380 assert!(res.is_err());
9384 fn test_waiting_for_batch() {
9385 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9386 let logger = test_utils::TestLogger::new();
9387 let secp_ctx = Secp256k1::new();
9388 let seed = [42; 32];
9389 let network = Network::Testnet;
9390 let best_block = BestBlock::from_network(network);
9391 let chain_hash = ChainHash::using_genesis_block(network);
9392 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9394 let mut config = UserConfig::default();
9395 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9396 // channel in a batch before all channels are ready.
9397 config.channel_handshake_limits.trust_own_funding_0conf = true;
9399 // Create a channel from node a to node b that will be part of batch funding.
9400 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9401 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9406 &channelmanager::provided_init_features(&config),
9416 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9417 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9418 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9423 &channelmanager::provided_channel_type_features(&config),
9424 &channelmanager::provided_init_features(&config),
9430 true, // Allow node b to send a 0conf channel_ready.
9433 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9434 node_a_chan.accept_channel(
9435 &accept_channel_msg,
9436 &config.channel_handshake_limits,
9437 &channelmanager::provided_init_features(&config),
9440 // Fund the channel with a batch funding transaction.
9441 let output_script = node_a_chan.context.get_funding_redeemscript();
9442 let tx = Transaction {
9444 lock_time: LockTime::ZERO,
9448 value: 10000000, script_pubkey: output_script.clone(),
9451 value: 10000000, script_pubkey: Builder::new().into_script(),
9454 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9455 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9460 ).map_err(|_| ()).unwrap();
9461 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9462 &funding_created_msg.unwrap(),
9466 ).map_err(|_| ()).unwrap();
9467 let node_b_updates = node_b_chan.monitor_updating_restored(
9475 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9476 // broadcasting the funding transaction until the batch is ready.
9477 let _ = node_a_chan.funding_signed(
9478 &funding_signed_msg.unwrap(),
9483 let node_a_updates = node_a_chan.monitor_updating_restored(
9490 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9491 // as the funding transaction depends on all channels in the batch becoming ready.
9492 assert!(node_a_updates.channel_ready.is_none());
9493 assert!(node_a_updates.funding_broadcastable.is_none());
9495 node_a_chan.context.channel_state,
9496 ChannelState::FundingSent as u32 |
9497 ChannelState::WaitingForBatch as u32,
9500 // It is possible to receive a 0conf channel_ready from the remote node.
9501 node_a_chan.channel_ready(
9502 &node_b_updates.channel_ready.unwrap(),
9510 node_a_chan.context.channel_state,
9511 ChannelState::FundingSent as u32 |
9512 ChannelState::WaitingForBatch as u32 |
9513 ChannelState::TheirChannelReady as u32,
9516 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9517 node_a_chan.set_batch_ready();
9519 node_a_chan.context.channel_state,
9520 ChannelState::FundingSent as u32 |
9521 ChannelState::TheirChannelReady as u32,
9523 assert!(node_a_chan.check_get_channel_ready(0).is_some());