1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script,Builder};
12 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
13 use bitcoin::util::sighash;
14 use bitcoin::consensus::encode;
16 use bitcoin::hashes::Hash;
17 use bitcoin::hashes::sha256::Hash as Sha256;
18 use bitcoin::hashes::sha256d::Hash as Sha256d;
19 use bitcoin::hash_types::{Txid, BlockHash};
21 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
22 use bitcoin::secp256k1::{PublicKey,SecretKey};
23 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
24 use bitcoin::secp256k1;
26 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
27 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs::DecodeError;
30 use crate::ln::script::{self, ShutdownScript};
31 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
32 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
33 use crate::ln::chan_utils;
34 use crate::ln::onion_utils::HTLCFailReason;
35 use crate::chain::BestBlock;
36 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
37 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
38 use crate::chain::transaction::{OutPoint, TransactionData};
39 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
40 use crate::events::ClosureReason;
41 use crate::routing::gossip::NodeId;
42 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
43 use crate::util::logger::Logger;
44 use crate::util::errors::APIError;
45 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
46 use crate::util::scid_utils::scid_from_parts;
49 use crate::prelude::*;
50 use core::{cmp,mem,fmt};
52 #[cfg(any(test, fuzzing, debug_assertions))]
53 use crate::sync::Mutex;
54 use bitcoin::hashes::hex::ToHex;
55 use crate::sign::type_resolver::ChannelSignerType;
58 pub struct ChannelValueStat {
59 pub value_to_self_msat: u64,
60 pub channel_value_msat: u64,
61 pub channel_reserve_msat: u64,
62 pub pending_outbound_htlcs_amount_msat: u64,
63 pub pending_inbound_htlcs_amount_msat: u64,
64 pub holding_cell_outbound_amount_msat: u64,
65 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
66 pub counterparty_dust_limit_msat: u64,
69 pub struct AvailableBalances {
70 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
71 pub balance_msat: u64,
72 /// Total amount available for our counterparty to send to us.
73 pub inbound_capacity_msat: u64,
74 /// Total amount available for us to send to our counterparty.
75 pub outbound_capacity_msat: u64,
76 /// The maximum value we can assign to the next outbound HTLC
77 pub next_outbound_htlc_limit_msat: u64,
78 /// The minimum value we can assign to the next outbound HTLC
79 pub next_outbound_htlc_minimum_msat: u64,
82 #[derive(Debug, Clone, Copy, PartialEq)]
84 // Inbound states mirroring InboundHTLCState
86 AwaitingRemoteRevokeToAnnounce,
87 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
88 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
89 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
90 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
91 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
93 // Outbound state can only be `LocalAnnounced` or `Committed`
97 enum InboundHTLCRemovalReason {
98 FailRelay(msgs::OnionErrorPacket),
99 FailMalformed(([u8; 32], u16)),
100 Fulfill(PaymentPreimage),
103 enum InboundHTLCState {
104 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
105 /// update_add_htlc message for this HTLC.
106 RemoteAnnounced(PendingHTLCStatus),
107 /// Included in a received commitment_signed message (implying we've
108 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
109 /// state (see the example below). We have not yet included this HTLC in a
110 /// commitment_signed message because we are waiting on the remote's
111 /// aforementioned state revocation. One reason this missing remote RAA
112 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
113 /// is because every time we create a new "state", i.e. every time we sign a
114 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
115 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
116 /// sent provided the per_commitment_point for our current commitment tx.
117 /// The other reason we should not send a commitment_signed without their RAA
118 /// is because their RAA serves to ACK our previous commitment_signed.
120 /// Here's an example of how an HTLC could come to be in this state:
121 /// remote --> update_add_htlc(prev_htlc) --> local
122 /// remote --> commitment_signed(prev_htlc) --> local
123 /// remote <-- revoke_and_ack <-- local
124 /// remote <-- commitment_signed(prev_htlc) <-- local
125 /// [note that here, the remote does not respond with a RAA]
126 /// remote --> update_add_htlc(this_htlc) --> local
127 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
128 /// Now `this_htlc` will be assigned this state. It's unable to be officially
129 /// accepted, i.e. included in a commitment_signed, because we're missing the
130 /// RAA that provides our next per_commitment_point. The per_commitment_point
131 /// is used to derive commitment keys, which are used to construct the
132 /// signatures in a commitment_signed message.
133 /// Implies AwaitingRemoteRevoke.
135 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
136 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
137 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
138 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
139 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
140 /// channel (before it can then get forwarded and/or removed).
141 /// Implies AwaitingRemoteRevoke.
142 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
144 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
145 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
147 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
148 /// commitment transaction without it as otherwise we'll have to force-close the channel to
149 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
150 /// anyway). That said, ChannelMonitor does this for us (see
151 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
152 /// our own local state before then, once we're sure that the next commitment_signed and
153 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
154 LocalRemoved(InboundHTLCRemovalReason),
157 struct InboundHTLCOutput {
161 payment_hash: PaymentHash,
162 state: InboundHTLCState,
165 enum OutboundHTLCState {
166 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
167 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
168 /// we will promote to Committed (note that they may not accept it until the next time we
169 /// revoke, but we don't really care about that:
170 /// * they've revoked, so worst case we can announce an old state and get our (option on)
171 /// money back (though we won't), and,
172 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
173 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
174 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
175 /// we'll never get out of sync).
176 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
177 /// OutboundHTLCOutput's size just for a temporary bit
178 LocalAnnounced(Box<msgs::OnionPacket>),
180 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
181 /// the change (though they'll need to revoke before we fail the payment).
182 RemoteRemoved(OutboundHTLCOutcome),
183 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
184 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
185 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
186 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
187 /// remote revoke_and_ack on a previous state before we can do so.
188 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
189 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
190 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
191 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
192 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
193 /// revoke_and_ack to drop completely.
194 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
198 enum OutboundHTLCOutcome {
199 /// LDK version 0.0.105+ will always fill in the preimage here.
200 Success(Option<PaymentPreimage>),
201 Failure(HTLCFailReason),
204 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
205 fn from(o: Option<HTLCFailReason>) -> Self {
207 None => OutboundHTLCOutcome::Success(None),
208 Some(r) => OutboundHTLCOutcome::Failure(r)
213 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
214 fn into(self) -> Option<&'a HTLCFailReason> {
216 OutboundHTLCOutcome::Success(_) => None,
217 OutboundHTLCOutcome::Failure(ref r) => Some(r)
222 struct OutboundHTLCOutput {
226 payment_hash: PaymentHash,
227 state: OutboundHTLCState,
229 skimmed_fee_msat: Option<u64>,
232 /// See AwaitingRemoteRevoke ChannelState for more info
233 enum HTLCUpdateAwaitingACK {
234 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
238 payment_hash: PaymentHash,
240 onion_routing_packet: msgs::OnionPacket,
241 // The extra fee we're skimming off the top of this HTLC.
242 skimmed_fee_msat: Option<u64>,
245 payment_preimage: PaymentPreimage,
250 err_packet: msgs::OnionErrorPacket,
254 /// There are a few "states" and then a number of flags which can be applied:
255 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
256 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
257 /// move on to `ChannelReady`.
258 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
259 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
260 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
262 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
263 OurInitSent = 1 << 0,
264 /// Implies we have received their `open_channel`/`accept_channel` message
265 TheirInitSent = 1 << 1,
266 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
267 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
268 /// upon receipt of `funding_created`, so simply skip this state.
270 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
271 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
272 /// and our counterparty consider the funding transaction confirmed.
274 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
275 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
276 TheirChannelReady = 1 << 4,
277 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
278 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
279 OurChannelReady = 1 << 5,
281 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
282 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
284 PeerDisconnected = 1 << 7,
285 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
286 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
287 /// sending any outbound messages until they've managed to finish.
288 MonitorUpdateInProgress = 1 << 8,
289 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
290 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
291 /// messages as then we will be unable to determine which HTLCs they included in their
292 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
294 /// Flag is set on `ChannelReady`.
295 AwaitingRemoteRevoke = 1 << 9,
296 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
297 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
298 /// to respond with our own shutdown message when possible.
299 RemoteShutdownSent = 1 << 10,
300 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
301 /// point, we may not add any new HTLCs to the channel.
302 LocalShutdownSent = 1 << 11,
303 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
304 /// to drop us, but we store this anyway.
305 ShutdownComplete = 4096,
306 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
307 /// broadcasting of the funding transaction is being held until all channels in the batch
308 /// have received funding_signed and have their monitors persisted.
309 WaitingForBatch = 1 << 13,
311 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
312 ChannelState::LocalShutdownSent as u32 |
313 ChannelState::RemoteShutdownSent as u32;
314 const MULTI_STATE_FLAGS: u32 =
315 BOTH_SIDES_SHUTDOWN_MASK |
316 ChannelState::PeerDisconnected as u32 |
317 ChannelState::MonitorUpdateInProgress as u32;
318 const STATE_FLAGS: u32 =
320 ChannelState::TheirChannelReady as u32 |
321 ChannelState::OurChannelReady as u32 |
322 ChannelState::AwaitingRemoteRevoke as u32 |
323 ChannelState::WaitingForBatch as u32;
325 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
327 pub const DEFAULT_MAX_HTLCS: u16 = 50;
329 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
330 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
331 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
332 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
336 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
338 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
340 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
342 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
343 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
344 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
345 /// `holder_max_htlc_value_in_flight_msat`.
346 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
348 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
349 /// `option_support_large_channel` (aka wumbo channels) is not supported.
351 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
353 /// Total bitcoin supply in satoshis.
354 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
356 /// The maximum network dust limit for standard script formats. This currently represents the
357 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
358 /// transaction non-standard and thus refuses to relay it.
359 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
360 /// implementations use this value for their dust limit today.
361 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
363 /// The maximum channel dust limit we will accept from our counterparty.
364 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
366 /// The dust limit is used for both the commitment transaction outputs as well as the closing
367 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
368 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
369 /// In order to avoid having to concern ourselves with standardness during the closing process, we
370 /// simply require our counterparty to use a dust limit which will leave any segwit output
372 /// See <https://github.com/lightning/bolts/issues/905> for more details.
373 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
375 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
376 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
378 /// Used to return a simple Error back to ChannelManager. Will get converted to a
379 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
380 /// channel_id in ChannelManager.
381 pub(super) enum ChannelError {
387 impl fmt::Debug for ChannelError {
388 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
390 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
391 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
392 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
397 impl fmt::Display for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
401 &ChannelError::Warn(ref e) => write!(f, "{}", e),
402 &ChannelError::Close(ref e) => write!(f, "{}", e),
407 macro_rules! secp_check {
408 ($res: expr, $err: expr) => {
411 Err(_) => return Err(ChannelError::Close($err)),
416 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
417 /// our counterparty or not. However, we don't want to announce updates right away to avoid
418 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
419 /// our channel_update message and track the current state here.
420 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
421 #[derive(Clone, Copy, PartialEq)]
422 pub(super) enum ChannelUpdateStatus {
423 /// We've announced the channel as enabled and are connected to our peer.
425 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
427 /// Our channel is live again, but we haven't announced the channel as enabled yet.
429 /// We've announced the channel as disabled.
433 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
435 pub enum AnnouncementSigsState {
436 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
437 /// we sent the last `AnnouncementSignatures`.
439 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
440 /// This state never appears on disk - instead we write `NotSent`.
442 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
443 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
444 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
445 /// they send back a `RevokeAndACK`.
446 /// This state never appears on disk - instead we write `NotSent`.
448 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
449 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
453 /// An enum indicating whether the local or remote side offered a given HTLC.
459 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
462 pending_htlcs_value_msat: u64,
463 on_counterparty_tx_dust_exposure_msat: u64,
464 on_holder_tx_dust_exposure_msat: u64,
465 holding_cell_msat: u64,
466 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
469 /// An enum gathering stats on commitment transaction, either local or remote.
470 struct CommitmentStats<'a> {
471 tx: CommitmentTransaction, // the transaction info
472 feerate_per_kw: u32, // the feerate included to build the transaction
473 total_fee_sat: u64, // the total fee included in the transaction
474 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
475 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
476 local_balance_msat: u64, // local balance before fees but considering dust limits
477 remote_balance_msat: u64, // remote balance before fees but considering dust limits
478 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
481 /// Used when calculating whether we or the remote can afford an additional HTLC.
482 struct HTLCCandidate {
484 origin: HTLCInitiator,
488 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
496 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
498 enum UpdateFulfillFetch {
500 monitor_update: ChannelMonitorUpdate,
501 htlc_value_msat: u64,
502 msg: Option<msgs::UpdateFulfillHTLC>,
507 /// The return type of get_update_fulfill_htlc_and_commit.
508 pub enum UpdateFulfillCommitFetch {
509 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
510 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
511 /// previously placed in the holding cell (and has since been removed).
513 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
514 monitor_update: ChannelMonitorUpdate,
515 /// The value of the HTLC which was claimed, in msat.
516 htlc_value_msat: u64,
518 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
519 /// or has been forgotten (presumably previously claimed).
523 /// The return value of `monitor_updating_restored`
524 pub(super) struct MonitorRestoreUpdates {
525 pub raa: Option<msgs::RevokeAndACK>,
526 pub commitment_update: Option<msgs::CommitmentUpdate>,
527 pub order: RAACommitmentOrder,
528 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
529 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
530 pub finalized_claimed_htlcs: Vec<HTLCSource>,
531 pub funding_broadcastable: Option<Transaction>,
532 pub channel_ready: Option<msgs::ChannelReady>,
533 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
536 /// The return value of `channel_reestablish`
537 pub(super) struct ReestablishResponses {
538 pub channel_ready: Option<msgs::ChannelReady>,
539 pub raa: Option<msgs::RevokeAndACK>,
540 pub commitment_update: Option<msgs::CommitmentUpdate>,
541 pub order: RAACommitmentOrder,
542 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
543 pub shutdown_msg: Option<msgs::Shutdown>,
546 /// The result of a shutdown that should be handled.
548 pub(crate) struct ShutdownResult {
549 /// A channel monitor update to apply.
550 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
551 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
552 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
553 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
554 /// propagated to the remainder of the batch.
555 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
558 /// If the majority of the channels funds are to the fundee and the initiator holds only just
559 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
560 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
561 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
562 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
563 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
564 /// by this multiple without hitting this case, before sending.
565 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
566 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
567 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
568 /// leave the channel less usable as we hold a bigger reserve.
569 #[cfg(any(fuzzing, test))]
570 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
571 #[cfg(not(any(fuzzing, test)))]
572 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
574 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
575 /// channel creation on an inbound channel, we simply force-close and move on.
576 /// This constant is the one suggested in BOLT 2.
577 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
579 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
580 /// not have enough balance value remaining to cover the onchain cost of this new
581 /// HTLC weight. If this happens, our counterparty fails the reception of our
582 /// commitment_signed including this new HTLC due to infringement on the channel
584 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
585 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
586 /// leads to a channel force-close. Ultimately, this is an issue coming from the
587 /// design of LN state machines, allowing asynchronous updates.
588 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
590 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
591 /// commitment transaction fees, with at least this many HTLCs present on the commitment
592 /// transaction (not counting the value of the HTLCs themselves).
593 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
595 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
596 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
597 /// ChannelUpdate prompted by the config update. This value was determined as follows:
599 /// * The expected interval between ticks (1 minute).
600 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
601 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
602 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
603 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
605 /// The number of ticks that may elapse while we're waiting for a response to a
606 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
609 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
610 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
612 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
613 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
614 /// exceeding this age limit will be force-closed and purged from memory.
615 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
617 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
618 pub(crate) const COINBASE_MATURITY: u32 = 100;
620 struct PendingChannelMonitorUpdate {
621 update: ChannelMonitorUpdate,
624 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
625 (0, update, required),
628 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
629 /// its variants containing an appropriate channel struct.
630 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
631 UnfundedOutboundV1(OutboundV1Channel<SP>),
632 UnfundedInboundV1(InboundV1Channel<SP>),
636 impl<'a, SP: Deref> ChannelPhase<SP> where
637 SP::Target: SignerProvider,
638 <SP::Target as SignerProvider>::Signer: ChannelSigner,
640 pub fn context(&'a self) -> &'a ChannelContext<SP> {
642 ChannelPhase::Funded(chan) => &chan.context,
643 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
644 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
648 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
650 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
651 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
652 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
657 /// Contains all state common to unfunded inbound/outbound channels.
658 pub(super) struct UnfundedChannelContext {
659 /// A counter tracking how many ticks have elapsed since this unfunded channel was
660 /// created. If this unfunded channel reaches peer has yet to respond after reaching
661 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
663 /// This is so that we don't keep channels around that haven't progressed to a funded state
664 /// in a timely manner.
665 unfunded_channel_age_ticks: usize,
668 impl UnfundedChannelContext {
669 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
670 /// having reached the unfunded channel age limit.
672 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
673 pub fn should_expire_unfunded_channel(&mut self) -> bool {
674 self.unfunded_channel_age_ticks += 1;
675 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
679 /// Contains everything about the channel including state, and various flags.
680 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
681 config: LegacyChannelConfig,
683 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
684 // constructed using it. The second element in the tuple corresponds to the number of ticks that
685 // have elapsed since the update occurred.
686 prev_config: Option<(ChannelConfig, usize)>,
688 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
692 /// The current channel ID.
693 channel_id: ChannelId,
694 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
695 /// Will be `None` for channels created prior to 0.0.115.
696 temporary_channel_id: Option<ChannelId>,
699 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
700 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
702 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
703 // Note that a number of our tests were written prior to the behavior here which retransmits
704 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
706 #[cfg(any(test, feature = "_test_utils"))]
707 pub(crate) announcement_sigs_state: AnnouncementSigsState,
708 #[cfg(not(any(test, feature = "_test_utils")))]
709 announcement_sigs_state: AnnouncementSigsState,
711 secp_ctx: Secp256k1<secp256k1::All>,
712 channel_value_satoshis: u64,
714 latest_monitor_update_id: u64,
716 holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
717 shutdown_scriptpubkey: Option<ShutdownScript>,
718 destination_script: Script,
720 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
721 // generation start at 0 and count up...this simplifies some parts of implementation at the
722 // cost of others, but should really just be changed.
724 cur_holder_commitment_transaction_number: u64,
725 cur_counterparty_commitment_transaction_number: u64,
726 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
727 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
728 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
729 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
731 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
732 /// need to ensure we resend them in the order we originally generated them. Note that because
733 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
734 /// sufficient to simply set this to the opposite of any message we are generating as we
735 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
736 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
738 resend_order: RAACommitmentOrder,
740 monitor_pending_channel_ready: bool,
741 monitor_pending_revoke_and_ack: bool,
742 monitor_pending_commitment_signed: bool,
744 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
745 // responsible for some of the HTLCs here or not - we don't know whether the update in question
746 // completed or not. We currently ignore these fields entirely when force-closing a channel,
747 // but need to handle this somehow or we run the risk of losing HTLCs!
748 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
749 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
750 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
752 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
753 /// but our signer (initially) refused to give us a signature, we should retry at some point in
754 /// the future when the signer indicates it may have a signature for us.
756 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
757 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
758 signer_pending_commitment_update: bool,
759 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
760 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
761 /// outbound or inbound.
762 signer_pending_funding: bool,
764 // pending_update_fee is filled when sending and receiving update_fee.
766 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
767 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
768 // generating new commitment transactions with exactly the same criteria as inbound/outbound
769 // HTLCs with similar state.
770 pending_update_fee: Option<(u32, FeeUpdateState)>,
771 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
772 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
773 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
774 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
775 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
776 holding_cell_update_fee: Option<u32>,
777 next_holder_htlc_id: u64,
778 next_counterparty_htlc_id: u64,
781 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
782 /// when the channel is updated in ways which may impact the `channel_update` message or when a
783 /// new block is received, ensuring it's always at least moderately close to the current real
785 update_time_counter: u32,
787 #[cfg(debug_assertions)]
788 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
789 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
790 #[cfg(debug_assertions)]
791 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
792 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
794 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
795 target_closing_feerate_sats_per_kw: Option<u32>,
797 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
798 /// update, we need to delay processing it until later. We do that here by simply storing the
799 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
800 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
802 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
803 /// transaction. These are set once we reach `closing_negotiation_ready`.
805 pub(crate) closing_fee_limits: Option<(u64, u64)>,
807 closing_fee_limits: Option<(u64, u64)>,
809 /// The hash of the block in which the funding transaction was included.
810 funding_tx_confirmed_in: Option<BlockHash>,
811 funding_tx_confirmation_height: u32,
812 short_channel_id: Option<u64>,
813 /// Either the height at which this channel was created or the height at which it was last
814 /// serialized if it was serialized by versions prior to 0.0.103.
815 /// We use this to close if funding is never broadcasted.
816 channel_creation_height: u32,
818 counterparty_dust_limit_satoshis: u64,
821 pub(super) holder_dust_limit_satoshis: u64,
823 holder_dust_limit_satoshis: u64,
826 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
828 counterparty_max_htlc_value_in_flight_msat: u64,
831 pub(super) holder_max_htlc_value_in_flight_msat: u64,
833 holder_max_htlc_value_in_flight_msat: u64,
835 /// minimum channel reserve for self to maintain - set by them.
836 counterparty_selected_channel_reserve_satoshis: Option<u64>,
839 pub(super) holder_selected_channel_reserve_satoshis: u64,
841 holder_selected_channel_reserve_satoshis: u64,
843 counterparty_htlc_minimum_msat: u64,
844 holder_htlc_minimum_msat: u64,
846 pub counterparty_max_accepted_htlcs: u16,
848 counterparty_max_accepted_htlcs: u16,
849 holder_max_accepted_htlcs: u16,
850 minimum_depth: Option<u32>,
852 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
854 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
855 funding_transaction: Option<Transaction>,
856 is_batch_funding: Option<()>,
858 counterparty_cur_commitment_point: Option<PublicKey>,
859 counterparty_prev_commitment_point: Option<PublicKey>,
860 counterparty_node_id: PublicKey,
862 counterparty_shutdown_scriptpubkey: Option<Script>,
864 commitment_secrets: CounterpartyCommitmentSecrets,
866 channel_update_status: ChannelUpdateStatus,
867 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
868 /// not complete within a single timer tick (one minute), we should force-close the channel.
869 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
871 /// Note that this field is reset to false on deserialization to give us a chance to connect to
872 /// our peer and start the closing_signed negotiation fresh.
873 closing_signed_in_flight: bool,
875 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
876 /// This can be used to rebroadcast the channel_announcement message later.
877 announcement_sigs: Option<(Signature, Signature)>,
879 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
880 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
881 // be, by comparing the cached values to the fee of the tranaction generated by
882 // `build_commitment_transaction`.
883 #[cfg(any(test, fuzzing))]
884 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
885 #[cfg(any(test, fuzzing))]
886 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
888 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
889 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
890 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
891 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
892 /// message until we receive a channel_reestablish.
894 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
895 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
897 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
898 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
899 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
900 /// unblock the state machine.
902 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
903 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
904 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
906 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
907 /// [`msgs::RevokeAndACK`] message from the counterparty.
908 sent_message_awaiting_response: Option<usize>,
910 #[cfg(any(test, fuzzing))]
911 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
912 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
913 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
914 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
915 // is fine, but as a sanity check in our failure to generate the second claim, we check here
916 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
917 historical_inbound_htlc_fulfills: HashSet<u64>,
919 /// This channel's type, as negotiated during channel open
920 channel_type: ChannelTypeFeatures,
922 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
923 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
924 // the channel's funding UTXO.
926 // We also use this when sending our peer a channel_update that isn't to be broadcasted
927 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
928 // associated channel mapping.
930 // We only bother storing the most recent SCID alias at any time, though our counterparty has
931 // to store all of them.
932 latest_inbound_scid_alias: Option<u64>,
934 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
935 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
936 // don't currently support node id aliases and eventually privacy should be provided with
937 // blinded paths instead of simple scid+node_id aliases.
938 outbound_scid_alias: u64,
940 // We track whether we already emitted a `ChannelPending` event.
941 channel_pending_event_emitted: bool,
943 // We track whether we already emitted a `ChannelReady` event.
944 channel_ready_event_emitted: bool,
946 /// The unique identifier used to re-derive the private key material for the channel through
947 /// [`SignerProvider::derive_channel_signer`].
948 channel_keys_id: [u8; 32],
950 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
951 /// store it here and only release it to the `ChannelManager` once it asks for it.
952 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
955 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
956 /// Allowed in any state (including after shutdown)
957 pub fn get_update_time_counter(&self) -> u32 {
958 self.update_time_counter
961 pub fn get_latest_monitor_update_id(&self) -> u64 {
962 self.latest_monitor_update_id
965 pub fn should_announce(&self) -> bool {
966 self.config.announced_channel
969 pub fn is_outbound(&self) -> bool {
970 self.channel_transaction_parameters.is_outbound_from_holder
973 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
974 /// Allowed in any state (including after shutdown)
975 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
976 self.config.options.forwarding_fee_base_msat
979 /// Returns true if we've ever received a message from the remote end for this Channel
980 pub fn have_received_message(&self) -> bool {
981 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
984 /// Returns true if this channel is fully established and not known to be closing.
985 /// Allowed in any state (including after shutdown)
986 pub fn is_usable(&self) -> bool {
987 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
988 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
991 /// shutdown state returns the state of the channel in its various stages of shutdown
992 pub fn shutdown_state(&self) -> ChannelShutdownState {
993 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
994 return ChannelShutdownState::ShutdownComplete;
996 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
997 return ChannelShutdownState::ShutdownInitiated;
999 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1000 return ChannelShutdownState::ResolvingHTLCs;
1002 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1003 return ChannelShutdownState::NegotiatingClosingFee;
1005 return ChannelShutdownState::NotShuttingDown;
1008 fn closing_negotiation_ready(&self) -> bool {
1009 self.pending_inbound_htlcs.is_empty() &&
1010 self.pending_outbound_htlcs.is_empty() &&
1011 self.pending_update_fee.is_none() &&
1012 self.channel_state &
1013 (BOTH_SIDES_SHUTDOWN_MASK |
1014 ChannelState::AwaitingRemoteRevoke as u32 |
1015 ChannelState::PeerDisconnected as u32 |
1016 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1019 /// Returns true if this channel is currently available for use. This is a superset of
1020 /// is_usable() and considers things like the channel being temporarily disabled.
1021 /// Allowed in any state (including after shutdown)
1022 pub fn is_live(&self) -> bool {
1023 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1026 // Public utilities:
1028 pub fn channel_id(&self) -> ChannelId {
1032 // Return the `temporary_channel_id` used during channel establishment.
1034 // Will return `None` for channels created prior to LDK version 0.0.115.
1035 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1036 self.temporary_channel_id
1039 pub fn minimum_depth(&self) -> Option<u32> {
1043 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1044 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1045 pub fn get_user_id(&self) -> u128 {
1049 /// Gets the channel's type
1050 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1054 /// Gets the channel's `short_channel_id`.
1056 /// Will return `None` if the channel hasn't been confirmed yet.
1057 pub fn get_short_channel_id(&self) -> Option<u64> {
1058 self.short_channel_id
1061 /// Allowed in any state (including after shutdown)
1062 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1063 self.latest_inbound_scid_alias
1066 /// Allowed in any state (including after shutdown)
1067 pub fn outbound_scid_alias(&self) -> u64 {
1068 self.outbound_scid_alias
1071 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1072 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1073 /// or prior to any channel actions during `Channel` initialization.
1074 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1075 debug_assert_eq!(self.outbound_scid_alias, 0);
1076 self.outbound_scid_alias = outbound_scid_alias;
1079 /// Returns the funding_txo we either got from our peer, or were given by
1080 /// get_funding_created.
1081 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1082 self.channel_transaction_parameters.funding_outpoint
1085 /// Returns the block hash in which our funding transaction was confirmed.
1086 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1087 self.funding_tx_confirmed_in
1090 /// Returns the current number of confirmations on the funding transaction.
1091 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1092 if self.funding_tx_confirmation_height == 0 {
1093 // We either haven't seen any confirmation yet, or observed a reorg.
1097 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1100 fn get_holder_selected_contest_delay(&self) -> u16 {
1101 self.channel_transaction_parameters.holder_selected_contest_delay
1104 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1105 &self.channel_transaction_parameters.holder_pubkeys
1108 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1109 self.channel_transaction_parameters.counterparty_parameters
1110 .as_ref().map(|params| params.selected_contest_delay)
1113 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1114 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1117 /// Allowed in any state (including after shutdown)
1118 pub fn get_counterparty_node_id(&self) -> PublicKey {
1119 self.counterparty_node_id
1122 /// Allowed in any state (including after shutdown)
1123 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1124 self.holder_htlc_minimum_msat
1127 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1128 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1129 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1132 /// Allowed in any state (including after shutdown)
1133 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1135 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1136 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1137 // channel might have been used to route very small values (either by honest users or as DoS).
1138 self.channel_value_satoshis * 1000 * 9 / 10,
1140 self.counterparty_max_htlc_value_in_flight_msat
1144 /// Allowed in any state (including after shutdown)
1145 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1146 self.counterparty_htlc_minimum_msat
1149 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1150 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1151 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1154 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1155 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1156 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1158 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1159 party_max_htlc_value_in_flight_msat
1164 pub fn get_value_satoshis(&self) -> u64 {
1165 self.channel_value_satoshis
1168 pub fn get_fee_proportional_millionths(&self) -> u32 {
1169 self.config.options.forwarding_fee_proportional_millionths
1172 pub fn get_cltv_expiry_delta(&self) -> u16 {
1173 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1176 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1177 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1178 where F::Target: FeeEstimator
1180 match self.config.options.max_dust_htlc_exposure {
1181 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1182 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1183 ConfirmationTarget::OnChainSweep);
1184 feerate_per_kw as u64 * multiplier
1186 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1190 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1191 pub fn prev_config(&self) -> Option<ChannelConfig> {
1192 self.prev_config.map(|prev_config| prev_config.0)
1195 // Checks whether we should emit a `ChannelPending` event.
1196 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1197 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1200 // Returns whether we already emitted a `ChannelPending` event.
1201 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1202 self.channel_pending_event_emitted
1205 // Remembers that we already emitted a `ChannelPending` event.
1206 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1207 self.channel_pending_event_emitted = true;
1210 // Checks whether we should emit a `ChannelReady` event.
1211 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1212 self.is_usable() && !self.channel_ready_event_emitted
1215 // Remembers that we already emitted a `ChannelReady` event.
1216 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1217 self.channel_ready_event_emitted = true;
1220 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1221 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1222 /// no longer be considered when forwarding HTLCs.
1223 pub fn maybe_expire_prev_config(&mut self) {
1224 if self.prev_config.is_none() {
1227 let prev_config = self.prev_config.as_mut().unwrap();
1229 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1230 self.prev_config = None;
1234 /// Returns the current [`ChannelConfig`] applied to the channel.
1235 pub fn config(&self) -> ChannelConfig {
1239 /// Updates the channel's config. A bool is returned indicating whether the config update
1240 /// applied resulted in a new ChannelUpdate message.
1241 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1242 let did_channel_update =
1243 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1244 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1245 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1246 if did_channel_update {
1247 self.prev_config = Some((self.config.options, 0));
1248 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1249 // policy change to propagate throughout the network.
1250 self.update_time_counter += 1;
1252 self.config.options = *config;
1256 /// Returns true if funding_signed was sent/received and the
1257 /// funding transaction has been broadcast if necessary.
1258 pub fn is_funding_broadcast(&self) -> bool {
1259 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1260 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1263 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1264 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1265 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1266 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1267 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1269 /// @local is used only to convert relevant internal structures which refer to remote vs local
1270 /// to decide value of outputs and direction of HTLCs.
1271 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1272 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1273 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1274 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1275 /// which peer generated this transaction and "to whom" this transaction flows.
1277 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1278 where L::Target: Logger
1280 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1281 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1282 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1284 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1285 let mut remote_htlc_total_msat = 0;
1286 let mut local_htlc_total_msat = 0;
1287 let mut value_to_self_msat_offset = 0;
1289 let mut feerate_per_kw = self.feerate_per_kw;
1290 if let Some((feerate, update_state)) = self.pending_update_fee {
1291 if match update_state {
1292 // Note that these match the inclusion criteria when scanning
1293 // pending_inbound_htlcs below.
1294 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1295 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1296 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1298 feerate_per_kw = feerate;
1302 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1303 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1304 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1306 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1308 macro_rules! get_htlc_in_commitment {
1309 ($htlc: expr, $offered: expr) => {
1310 HTLCOutputInCommitment {
1312 amount_msat: $htlc.amount_msat,
1313 cltv_expiry: $htlc.cltv_expiry,
1314 payment_hash: $htlc.payment_hash,
1315 transaction_output_index: None
1320 macro_rules! add_htlc_output {
1321 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1322 if $outbound == local { // "offered HTLC output"
1323 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1324 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1327 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1329 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1330 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1331 included_non_dust_htlcs.push((htlc_in_tx, $source));
1333 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1334 included_dust_htlcs.push((htlc_in_tx, $source));
1337 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1338 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1341 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1343 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1344 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1345 included_non_dust_htlcs.push((htlc_in_tx, $source));
1347 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1348 included_dust_htlcs.push((htlc_in_tx, $source));
1354 for ref htlc in self.pending_inbound_htlcs.iter() {
1355 let (include, state_name) = match htlc.state {
1356 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1357 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1358 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1359 InboundHTLCState::Committed => (true, "Committed"),
1360 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1364 add_htlc_output!(htlc, false, None, state_name);
1365 remote_htlc_total_msat += htlc.amount_msat;
1367 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1369 &InboundHTLCState::LocalRemoved(ref reason) => {
1370 if generated_by_local {
1371 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1372 value_to_self_msat_offset += htlc.amount_msat as i64;
1381 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1383 for ref htlc in self.pending_outbound_htlcs.iter() {
1384 let (include, state_name) = match htlc.state {
1385 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1386 OutboundHTLCState::Committed => (true, "Committed"),
1387 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1388 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1389 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1392 let preimage_opt = match htlc.state {
1393 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1394 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1395 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1399 if let Some(preimage) = preimage_opt {
1400 preimages.push(preimage);
1404 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1405 local_htlc_total_msat += htlc.amount_msat;
1407 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1409 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1410 value_to_self_msat_offset -= htlc.amount_msat as i64;
1412 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1413 if !generated_by_local {
1414 value_to_self_msat_offset -= htlc.amount_msat as i64;
1422 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1423 assert!(value_to_self_msat >= 0);
1424 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1425 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1426 // "violate" their reserve value by couting those against it. Thus, we have to convert
1427 // everything to i64 before subtracting as otherwise we can overflow.
1428 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1429 assert!(value_to_remote_msat >= 0);
1431 #[cfg(debug_assertions)]
1433 // Make sure that the to_self/to_remote is always either past the appropriate
1434 // channel_reserve *or* it is making progress towards it.
1435 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1436 self.holder_max_commitment_tx_output.lock().unwrap()
1438 self.counterparty_max_commitment_tx_output.lock().unwrap()
1440 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1441 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1442 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1443 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1446 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1447 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1448 let (value_to_self, value_to_remote) = if self.is_outbound() {
1449 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1451 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1454 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1455 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1456 let (funding_pubkey_a, funding_pubkey_b) = if local {
1457 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1459 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1462 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1463 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1468 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1469 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1474 let num_nondust_htlcs = included_non_dust_htlcs.len();
1476 let channel_parameters =
1477 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1478 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1479 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1486 &mut included_non_dust_htlcs,
1489 let mut htlcs_included = included_non_dust_htlcs;
1490 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1491 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1492 htlcs_included.append(&mut included_dust_htlcs);
1494 // For the stats, trimmed-to-0 the value in msats accordingly
1495 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1496 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1504 local_balance_msat: value_to_self_msat as u64,
1505 remote_balance_msat: value_to_remote_msat as u64,
1511 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1512 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1513 /// our counterparty!)
1514 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1515 /// TODO Some magic rust shit to compile-time check this?
1516 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1517 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1518 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1519 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1520 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1522 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1526 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1527 /// will sign and send to our counterparty.
1528 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1529 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1530 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1531 //may see payments to it!
1532 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1533 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1534 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1536 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1539 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1540 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1541 /// Panics if called before accept_channel/InboundV1Channel::new
1542 pub fn get_funding_redeemscript(&self) -> Script {
1543 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1546 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1547 &self.get_counterparty_pubkeys().funding_pubkey
1550 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1554 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1555 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1556 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1557 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1558 // more dust balance if the feerate increases when we have several HTLCs pending
1559 // which are near the dust limit.
1560 let mut feerate_per_kw = self.feerate_per_kw;
1561 // If there's a pending update fee, use it to ensure we aren't under-estimating
1562 // potential feerate updates coming soon.
1563 if let Some((feerate, _)) = self.pending_update_fee {
1564 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1566 if let Some(feerate) = outbound_feerate_update {
1567 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1569 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1572 /// Get forwarding information for the counterparty.
1573 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1574 self.counterparty_forwarding_info.clone()
1577 /// Returns a HTLCStats about inbound pending htlcs
1578 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1580 let mut stats = HTLCStats {
1581 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1582 pending_htlcs_value_msat: 0,
1583 on_counterparty_tx_dust_exposure_msat: 0,
1584 on_holder_tx_dust_exposure_msat: 0,
1585 holding_cell_msat: 0,
1586 on_holder_tx_holding_cell_htlcs_count: 0,
1589 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1592 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1593 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1594 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1596 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1597 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1598 for ref htlc in context.pending_inbound_htlcs.iter() {
1599 stats.pending_htlcs_value_msat += htlc.amount_msat;
1600 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1601 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1603 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1604 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1610 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1611 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1613 let mut stats = HTLCStats {
1614 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1615 pending_htlcs_value_msat: 0,
1616 on_counterparty_tx_dust_exposure_msat: 0,
1617 on_holder_tx_dust_exposure_msat: 0,
1618 holding_cell_msat: 0,
1619 on_holder_tx_holding_cell_htlcs_count: 0,
1622 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1625 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1626 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1627 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1629 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1630 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1631 for ref htlc in context.pending_outbound_htlcs.iter() {
1632 stats.pending_htlcs_value_msat += htlc.amount_msat;
1633 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1634 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1636 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1637 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1641 for update in context.holding_cell_htlc_updates.iter() {
1642 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1643 stats.pending_htlcs += 1;
1644 stats.pending_htlcs_value_msat += amount_msat;
1645 stats.holding_cell_msat += amount_msat;
1646 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1647 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1649 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1650 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1652 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1659 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1660 /// Doesn't bother handling the
1661 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1662 /// corner case properly.
1663 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1664 -> AvailableBalances
1665 where F::Target: FeeEstimator
1667 let context = &self;
1668 // Note that we have to handle overflow due to the above case.
1669 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1670 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1672 let mut balance_msat = context.value_to_self_msat;
1673 for ref htlc in context.pending_inbound_htlcs.iter() {
1674 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1675 balance_msat += htlc.amount_msat;
1678 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1680 let outbound_capacity_msat = context.value_to_self_msat
1681 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1683 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1685 let mut available_capacity_msat = outbound_capacity_msat;
1687 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1688 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1692 if context.is_outbound() {
1693 // We should mind channel commit tx fee when computing how much of the available capacity
1694 // can be used in the next htlc. Mirrors the logic in send_htlc.
1696 // The fee depends on whether the amount we will be sending is above dust or not,
1697 // and the answer will in turn change the amount itself — making it a circular
1699 // This complicates the computation around dust-values, up to the one-htlc-value.
1700 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1701 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1702 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1705 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1706 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1707 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1708 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1709 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1710 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1711 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1714 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1715 // value ends up being below dust, we have this fee available again. In that case,
1716 // match the value to right-below-dust.
1717 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1718 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1719 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1720 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1721 debug_assert!(one_htlc_difference_msat != 0);
1722 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1723 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1724 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1726 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1729 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1730 // sending a new HTLC won't reduce their balance below our reserve threshold.
1731 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1732 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1733 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1736 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1737 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1739 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1740 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1741 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1743 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1744 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1745 // we've selected for them, we can only send dust HTLCs.
1746 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1750 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1752 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1753 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1754 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1755 // send above the dust limit (as the router can always overpay to meet the dust limit).
1756 let mut remaining_msat_below_dust_exposure_limit = None;
1757 let mut dust_exposure_dust_limit_msat = 0;
1758 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1760 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1761 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1763 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1764 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1765 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1767 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1768 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1769 remaining_msat_below_dust_exposure_limit =
1770 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1771 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1774 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1775 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1776 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1777 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1778 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1779 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1782 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1783 if available_capacity_msat < dust_exposure_dust_limit_msat {
1784 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1786 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1790 available_capacity_msat = cmp::min(available_capacity_msat,
1791 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1793 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1794 available_capacity_msat = 0;
1798 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1799 - context.value_to_self_msat as i64
1800 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1801 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1803 outbound_capacity_msat,
1804 next_outbound_htlc_limit_msat: available_capacity_msat,
1805 next_outbound_htlc_minimum_msat,
1810 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1811 let context = &self;
1812 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1815 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1816 /// number of pending HTLCs that are on track to be in our next commitment tx.
1818 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1819 /// `fee_spike_buffer_htlc` is `Some`.
1821 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1822 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1824 /// Dust HTLCs are excluded.
1825 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1826 let context = &self;
1827 assert!(context.is_outbound());
1829 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1832 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1833 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1835 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1836 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1838 let mut addl_htlcs = 0;
1839 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1841 HTLCInitiator::LocalOffered => {
1842 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1846 HTLCInitiator::RemoteOffered => {
1847 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1853 let mut included_htlcs = 0;
1854 for ref htlc in context.pending_inbound_htlcs.iter() {
1855 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1858 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1859 // transaction including this HTLC if it times out before they RAA.
1860 included_htlcs += 1;
1863 for ref htlc in context.pending_outbound_htlcs.iter() {
1864 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1868 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1869 OutboundHTLCState::Committed => included_htlcs += 1,
1870 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1871 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1872 // transaction won't be generated until they send us their next RAA, which will mean
1873 // dropping any HTLCs in this state.
1878 for htlc in context.holding_cell_htlc_updates.iter() {
1880 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1881 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1886 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1887 // ack we're guaranteed to never include them in commitment txs anymore.
1891 let num_htlcs = included_htlcs + addl_htlcs;
1892 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1893 #[cfg(any(test, fuzzing))]
1896 if fee_spike_buffer_htlc.is_some() {
1897 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1899 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1900 + context.holding_cell_htlc_updates.len();
1901 let commitment_tx_info = CommitmentTxInfoCached {
1903 total_pending_htlcs,
1904 next_holder_htlc_id: match htlc.origin {
1905 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1906 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1908 next_counterparty_htlc_id: match htlc.origin {
1909 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1910 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1912 feerate: context.feerate_per_kw,
1914 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1919 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1920 /// pending HTLCs that are on track to be in their next commitment tx
1922 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1923 /// `fee_spike_buffer_htlc` is `Some`.
1925 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1926 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1928 /// Dust HTLCs are excluded.
1929 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1930 let context = &self;
1931 assert!(!context.is_outbound());
1933 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1936 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1937 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1939 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1940 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1942 let mut addl_htlcs = 0;
1943 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1945 HTLCInitiator::LocalOffered => {
1946 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1950 HTLCInitiator::RemoteOffered => {
1951 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1957 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1958 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1959 // committed outbound HTLCs, see below.
1960 let mut included_htlcs = 0;
1961 for ref htlc in context.pending_inbound_htlcs.iter() {
1962 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1965 included_htlcs += 1;
1968 for ref htlc in context.pending_outbound_htlcs.iter() {
1969 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1972 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1973 // i.e. if they've responded to us with an RAA after announcement.
1975 OutboundHTLCState::Committed => included_htlcs += 1,
1976 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1977 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1982 let num_htlcs = included_htlcs + addl_htlcs;
1983 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1984 #[cfg(any(test, fuzzing))]
1987 if fee_spike_buffer_htlc.is_some() {
1988 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1990 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1991 let commitment_tx_info = CommitmentTxInfoCached {
1993 total_pending_htlcs,
1994 next_holder_htlc_id: match htlc.origin {
1995 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1996 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1998 next_counterparty_htlc_id: match htlc.origin {
1999 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2000 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2002 feerate: context.feerate_per_kw,
2004 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2009 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2010 where F: Fn() -> Option<O> {
2011 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2012 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2019 /// Returns the transaction if there is a pending funding transaction that is yet to be
2021 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2022 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2025 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2027 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2028 self.if_unbroadcasted_funding(||
2029 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2033 /// Returns whether the channel is funded in a batch.
2034 pub fn is_batch_funding(&self) -> bool {
2035 self.is_batch_funding.is_some()
2038 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2040 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2041 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2044 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2045 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2046 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2047 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2048 /// immediately (others we will have to allow to time out).
2049 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2050 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2051 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2052 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2053 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2054 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2056 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2057 // return them to fail the payment.
2058 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2059 let counterparty_node_id = self.get_counterparty_node_id();
2060 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2062 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2063 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2068 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2069 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2070 // returning a channel monitor update here would imply a channel monitor update before
2071 // we even registered the channel monitor to begin with, which is invalid.
2072 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2073 // funding transaction, don't return a funding txo (which prevents providing the
2074 // monitor update to the user, even if we return one).
2075 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2076 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2077 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2078 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2079 update_id: self.latest_monitor_update_id,
2080 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2084 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2086 self.channel_state = ChannelState::ShutdownComplete as u32;
2087 self.update_time_counter += 1;
2090 dropped_outbound_htlcs,
2091 unbroadcasted_batch_funding_txid,
2096 // Internal utility functions for channels
2098 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2099 /// `channel_value_satoshis` in msat, set through
2100 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2102 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2104 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2105 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2106 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2108 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2111 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2113 channel_value_satoshis * 10 * configured_percent
2116 /// Returns a minimum channel reserve value the remote needs to maintain,
2117 /// required by us according to the configured or default
2118 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2120 /// Guaranteed to return a value no larger than channel_value_satoshis
2122 /// This is used both for outbound and inbound channels and has lower bound
2123 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2124 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2125 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2126 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2129 /// This is for legacy reasons, present for forward-compatibility.
2130 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2131 /// from storage. Hence, we use this function to not persist default values of
2132 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2133 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2134 let (q, _) = channel_value_satoshis.overflowing_div(100);
2135 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2138 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2139 // Note that num_htlcs should not include dust HTLCs.
2141 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2142 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2145 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2146 // Note that num_htlcs should not include dust HTLCs.
2147 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2148 // Note that we need to divide before multiplying to round properly,
2149 // since the lowest denomination of bitcoin on-chain is the satoshi.
2150 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2153 // Holder designates channel data owned for the benefit of the user client.
2154 // Counterparty designates channel data owned by the another channel participant entity.
2155 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2156 pub context: ChannelContext<SP>,
2159 #[cfg(any(test, fuzzing))]
2160 struct CommitmentTxInfoCached {
2162 total_pending_htlcs: usize,
2163 next_holder_htlc_id: u64,
2164 next_counterparty_htlc_id: u64,
2168 impl<SP: Deref> Channel<SP> where
2169 SP::Target: SignerProvider,
2170 <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2172 fn check_remote_fee<F: Deref, L: Deref>(
2173 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2174 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2175 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2177 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2178 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2179 // We generally don't care too much if they set the feerate to something very high, but it
2180 // could result in the channel being useless due to everything being dust. This doesn't
2181 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2182 // zero fee, so their fee is no longer considered to determine dust limits.
2183 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2185 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64;
2186 if feerate_per_kw as u64 > upper_limit {
2187 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2191 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2192 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2194 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2196 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2197 if feerate_per_kw < lower_limit {
2198 if let Some(cur_feerate) = cur_feerate_per_kw {
2199 if feerate_per_kw > cur_feerate {
2201 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2202 cur_feerate, feerate_per_kw);
2206 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2212 fn get_closing_scriptpubkey(&self) -> Script {
2213 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2214 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2215 // outside of those situations will fail.
2216 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2220 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2225 1 + // script length (0)
2229 )*4 + // * 4 for non-witness parts
2230 2 + // witness marker and flag
2231 1 + // witness element count
2232 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2233 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2234 2*(1 + 71); // two signatures + sighash type flags
2235 if let Some(spk) = a_scriptpubkey {
2236 ret += ((8+1) + // output values and script length
2237 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2239 if let Some(spk) = b_scriptpubkey {
2240 ret += ((8+1) + // output values and script length
2241 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2247 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2248 assert!(self.context.pending_inbound_htlcs.is_empty());
2249 assert!(self.context.pending_outbound_htlcs.is_empty());
2250 assert!(self.context.pending_update_fee.is_none());
2252 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2253 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2254 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2256 if value_to_holder < 0 {
2257 assert!(self.context.is_outbound());
2258 total_fee_satoshis += (-value_to_holder) as u64;
2259 } else if value_to_counterparty < 0 {
2260 assert!(!self.context.is_outbound());
2261 total_fee_satoshis += (-value_to_counterparty) as u64;
2264 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2265 value_to_counterparty = 0;
2268 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2269 value_to_holder = 0;
2272 assert!(self.context.shutdown_scriptpubkey.is_some());
2273 let holder_shutdown_script = self.get_closing_scriptpubkey();
2274 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2275 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2277 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2278 (closing_transaction, total_fee_satoshis)
2281 fn funding_outpoint(&self) -> OutPoint {
2282 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2285 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2288 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2289 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2291 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2293 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2294 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2295 where L::Target: Logger {
2296 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2297 // (see equivalent if condition there).
2298 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2299 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2300 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2301 self.context.latest_monitor_update_id = mon_update_id;
2302 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2303 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2307 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2308 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2309 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2310 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2312 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2313 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2315 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2317 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2318 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2319 // these, but for now we just have to treat them as normal.
2321 let mut pending_idx = core::usize::MAX;
2322 let mut htlc_value_msat = 0;
2323 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2324 if htlc.htlc_id == htlc_id_arg {
2325 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2326 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2327 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2329 InboundHTLCState::Committed => {},
2330 InboundHTLCState::LocalRemoved(ref reason) => {
2331 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2333 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2334 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2336 return UpdateFulfillFetch::DuplicateClaim {};
2339 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2340 // Don't return in release mode here so that we can update channel_monitor
2344 htlc_value_msat = htlc.amount_msat;
2348 if pending_idx == core::usize::MAX {
2349 #[cfg(any(test, fuzzing))]
2350 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2351 // this is simply a duplicate claim, not previously failed and we lost funds.
2352 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2353 return UpdateFulfillFetch::DuplicateClaim {};
2356 // Now update local state:
2358 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2359 // can claim it even if the channel hits the chain before we see their next commitment.
2360 self.context.latest_monitor_update_id += 1;
2361 let monitor_update = ChannelMonitorUpdate {
2362 update_id: self.context.latest_monitor_update_id,
2363 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2364 payment_preimage: payment_preimage_arg.clone(),
2368 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2369 // Note that this condition is the same as the assertion in
2370 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2371 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2372 // do not not get into this branch.
2373 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2374 match pending_update {
2375 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2376 if htlc_id_arg == htlc_id {
2377 // Make sure we don't leave latest_monitor_update_id incremented here:
2378 self.context.latest_monitor_update_id -= 1;
2379 #[cfg(any(test, fuzzing))]
2380 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2381 return UpdateFulfillFetch::DuplicateClaim {};
2384 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2385 if htlc_id_arg == htlc_id {
2386 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2387 // TODO: We may actually be able to switch to a fulfill here, though its
2388 // rare enough it may not be worth the complexity burden.
2389 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2390 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2396 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2397 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2398 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2400 #[cfg(any(test, fuzzing))]
2401 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2402 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2404 #[cfg(any(test, fuzzing))]
2405 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2408 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2409 if let InboundHTLCState::Committed = htlc.state {
2411 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2412 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2414 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2415 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2418 UpdateFulfillFetch::NewClaim {
2421 msg: Some(msgs::UpdateFulfillHTLC {
2422 channel_id: self.context.channel_id(),
2423 htlc_id: htlc_id_arg,
2424 payment_preimage: payment_preimage_arg,
2429 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2430 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2431 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2432 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2433 // Even if we aren't supposed to let new monitor updates with commitment state
2434 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2435 // matter what. Sadly, to push a new monitor update which flies before others
2436 // already queued, we have to insert it into the pending queue and update the
2437 // update_ids of all the following monitors.
2438 if release_cs_monitor && msg.is_some() {
2439 let mut additional_update = self.build_commitment_no_status_check(logger);
2440 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2441 // to be strictly increasing by one, so decrement it here.
2442 self.context.latest_monitor_update_id = monitor_update.update_id;
2443 monitor_update.updates.append(&mut additional_update.updates);
2445 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2446 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2447 monitor_update.update_id = new_mon_id;
2448 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2449 held_update.update.update_id += 1;
2452 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2453 let update = self.build_commitment_no_status_check(logger);
2454 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2460 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2461 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2463 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2467 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2468 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2469 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2470 /// before we fail backwards.
2472 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2473 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2474 /// [`ChannelError::Ignore`].
2475 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2476 -> Result<(), ChannelError> where L::Target: Logger {
2477 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2478 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2481 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2482 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2483 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2484 /// before we fail backwards.
2486 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2487 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2488 /// [`ChannelError::Ignore`].
2489 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2490 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2491 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2492 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2494 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2496 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2497 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2498 // these, but for now we just have to treat them as normal.
2500 let mut pending_idx = core::usize::MAX;
2501 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2502 if htlc.htlc_id == htlc_id_arg {
2504 InboundHTLCState::Committed => {},
2505 InboundHTLCState::LocalRemoved(ref reason) => {
2506 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2508 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2513 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2514 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2520 if pending_idx == core::usize::MAX {
2521 #[cfg(any(test, fuzzing))]
2522 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2523 // is simply a duplicate fail, not previously failed and we failed-back too early.
2524 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2528 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2529 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2530 force_holding_cell = true;
2533 // Now update local state:
2534 if force_holding_cell {
2535 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2536 match pending_update {
2537 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2538 if htlc_id_arg == htlc_id {
2539 #[cfg(any(test, fuzzing))]
2540 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2544 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2545 if htlc_id_arg == htlc_id {
2546 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2547 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2553 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2554 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2555 htlc_id: htlc_id_arg,
2561 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2563 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2564 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2567 Ok(Some(msgs::UpdateFailHTLC {
2568 channel_id: self.context.channel_id(),
2569 htlc_id: htlc_id_arg,
2574 // Message handlers:
2576 /// Handles a funding_signed message from the remote end.
2577 /// If this call is successful, broadcast the funding transaction (and not before!)
2578 pub fn funding_signed<L: Deref>(
2579 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2580 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2584 if !self.context.is_outbound() {
2585 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2587 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2588 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2590 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2591 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2592 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2593 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2596 let funding_script = self.context.get_funding_redeemscript();
2598 let counterparty_keys = self.context.build_remote_transaction_keys();
2599 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2600 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2601 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2603 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2604 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2606 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2607 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2609 let trusted_tx = initial_commitment_tx.trust();
2610 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2611 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2612 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2613 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2614 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2618 let holder_commitment_tx = HolderCommitmentTransaction::new(
2619 initial_commitment_tx,
2622 &self.context.get_holder_pubkeys().funding_pubkey,
2623 self.context.counterparty_funding_pubkey()
2626 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2627 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2630 let funding_redeemscript = self.context.get_funding_redeemscript();
2631 let funding_txo = self.context.get_funding_txo().unwrap();
2632 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2633 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2634 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2635 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2636 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2637 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2638 shutdown_script, self.context.get_holder_selected_contest_delay(),
2639 &self.context.destination_script, (funding_txo, funding_txo_script),
2640 &self.context.channel_transaction_parameters,
2641 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2643 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2645 channel_monitor.provide_initial_counterparty_commitment_tx(
2646 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2647 self.context.cur_counterparty_commitment_transaction_number,
2648 self.context.counterparty_cur_commitment_point.unwrap(),
2649 counterparty_initial_commitment_tx.feerate_per_kw(),
2650 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2651 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2653 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2654 if self.context.is_batch_funding() {
2655 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2657 self.context.channel_state = ChannelState::FundingSent as u32;
2659 self.context.cur_holder_commitment_transaction_number -= 1;
2660 self.context.cur_counterparty_commitment_transaction_number -= 1;
2662 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2664 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2665 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2669 /// Updates the state of the channel to indicate that all channels in the batch have received
2670 /// funding_signed and persisted their monitors.
2671 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2672 /// treated as a non-batch channel going forward.
2673 pub fn set_batch_ready(&mut self) {
2674 self.context.is_batch_funding = None;
2675 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2678 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2679 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2681 pub fn channel_ready<NS: Deref, L: Deref>(
2682 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2683 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2684 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2686 NS::Target: NodeSigner,
2689 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2690 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2691 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2694 if let Some(scid_alias) = msg.short_channel_id_alias {
2695 if Some(scid_alias) != self.context.short_channel_id {
2696 // The scid alias provided can be used to route payments *from* our counterparty,
2697 // i.e. can be used for inbound payments and provided in invoices, but is not used
2698 // when routing outbound payments.
2699 self.context.latest_inbound_scid_alias = Some(scid_alias);
2703 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2705 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2706 // batch, but we can receive channel_ready messages.
2708 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2709 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2711 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2712 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2713 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2714 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2715 self.context.update_time_counter += 1;
2716 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2717 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2718 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2719 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2721 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2722 // required, or they're sending a fresh SCID alias.
2723 let expected_point =
2724 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2725 // If they haven't ever sent an updated point, the point they send should match
2727 self.context.counterparty_cur_commitment_point
2728 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2729 // If we've advanced the commitment number once, the second commitment point is
2730 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2731 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2732 self.context.counterparty_prev_commitment_point
2734 // If they have sent updated points, channel_ready is always supposed to match
2735 // their "first" point, which we re-derive here.
2736 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2737 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2738 ).expect("We already advanced, so previous secret keys should have been validated already")))
2740 if expected_point != Some(msg.next_per_commitment_point) {
2741 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2745 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2748 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2749 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2751 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2753 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2756 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2757 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2758 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2759 ) -> Result<(), ChannelError>
2760 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2761 FE::Target: FeeEstimator, L::Target: Logger,
2763 // We can't accept HTLCs sent after we've sent a shutdown.
2764 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2765 if local_sent_shutdown {
2766 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2768 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2769 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2770 if remote_sent_shutdown {
2771 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2773 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2774 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2776 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2777 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2779 if msg.amount_msat == 0 {
2780 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2782 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2783 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2786 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2787 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2788 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2789 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2791 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2792 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2795 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2796 // the reserve_satoshis we told them to always have as direct payment so that they lose
2797 // something if we punish them for broadcasting an old state).
2798 // Note that we don't really care about having a small/no to_remote output in our local
2799 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2800 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2801 // present in the next commitment transaction we send them (at least for fulfilled ones,
2802 // failed ones won't modify value_to_self).
2803 // Note that we will send HTLCs which another instance of rust-lightning would think
2804 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2805 // Channel state once they will not be present in the next received commitment
2807 let mut removed_outbound_total_msat = 0;
2808 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2809 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2810 removed_outbound_total_msat += htlc.amount_msat;
2811 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2812 removed_outbound_total_msat += htlc.amount_msat;
2816 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2817 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2820 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2821 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2822 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2824 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2825 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2826 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2827 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2828 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2829 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2830 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2834 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2835 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2836 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2837 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2838 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2839 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2840 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2844 let pending_value_to_self_msat =
2845 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2846 let pending_remote_value_msat =
2847 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2848 if pending_remote_value_msat < msg.amount_msat {
2849 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2852 // Check that the remote can afford to pay for this HTLC on-chain at the current
2853 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2855 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2856 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2857 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2859 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2860 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2864 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2865 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2867 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2868 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2872 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2873 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2877 if !self.context.is_outbound() {
2878 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2879 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2880 // side, only on the sender's. Note that with anchor outputs we are no longer as
2881 // sensitive to fee spikes, so we need to account for them.
2882 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2883 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2884 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2885 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2887 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2888 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2889 // the HTLC, i.e. its status is already set to failing.
2890 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2891 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2894 // Check that they won't violate our local required channel reserve by adding this HTLC.
2895 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2896 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2897 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2898 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2901 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2902 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2904 if msg.cltv_expiry >= 500000000 {
2905 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2908 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2909 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2910 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2914 // Now update local state:
2915 self.context.next_counterparty_htlc_id += 1;
2916 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2917 htlc_id: msg.htlc_id,
2918 amount_msat: msg.amount_msat,
2919 payment_hash: msg.payment_hash,
2920 cltv_expiry: msg.cltv_expiry,
2921 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
2926 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2928 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
2929 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
2930 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
2931 if htlc.htlc_id == htlc_id {
2932 let outcome = match check_preimage {
2933 None => fail_reason.into(),
2934 Some(payment_preimage) => {
2935 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
2936 if payment_hash != htlc.payment_hash {
2937 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
2939 OutboundHTLCOutcome::Success(Some(payment_preimage))
2943 OutboundHTLCState::LocalAnnounced(_) =>
2944 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
2945 OutboundHTLCState::Committed => {
2946 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
2948 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
2949 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
2954 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
2957 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
2958 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2959 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
2961 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2962 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
2965 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
2968 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2969 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2970 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
2972 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2973 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
2976 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2980 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2981 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2982 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
2984 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2985 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
2988 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2992 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
2993 where L::Target: Logger
2995 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2996 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
2998 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2999 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3001 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3002 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3005 let funding_script = self.context.get_funding_redeemscript();
3007 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3009 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3010 let commitment_txid = {
3011 let trusted_tx = commitment_stats.tx.trust();
3012 let bitcoin_tx = trusted_tx.built_transaction();
3013 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3015 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3016 log_bytes!(msg.signature.serialize_compact()[..]),
3017 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3018 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3019 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3020 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3024 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3026 // If our counterparty updated the channel fee in this commitment transaction, check that
3027 // they can actually afford the new fee now.
3028 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3029 update_state == FeeUpdateState::RemoteAnnounced
3032 debug_assert!(!self.context.is_outbound());
3033 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3034 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3035 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3038 #[cfg(any(test, fuzzing))]
3040 if self.context.is_outbound() {
3041 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3042 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3043 if let Some(info) = projected_commit_tx_info {
3044 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3045 + self.context.holding_cell_htlc_updates.len();
3046 if info.total_pending_htlcs == total_pending_htlcs
3047 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3048 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3049 && info.feerate == self.context.feerate_per_kw {
3050 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3056 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3057 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3060 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3061 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3062 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3063 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3064 // backwards compatibility, we never use it in production. To provide test coverage, here,
3065 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3066 #[allow(unused_assignments, unused_mut)]
3067 let mut separate_nondust_htlc_sources = false;
3068 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3069 use core::hash::{BuildHasher, Hasher};
3070 // Get a random value using the only std API to do so - the DefaultHasher
3071 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3072 separate_nondust_htlc_sources = rand_val % 2 == 0;
3075 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3076 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3077 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3078 if let Some(_) = htlc.transaction_output_index {
3079 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3080 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3081 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3083 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3084 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3085 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3086 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3087 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3088 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3089 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3090 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3092 if !separate_nondust_htlc_sources {
3093 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3096 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3098 if separate_nondust_htlc_sources {
3099 if let Some(source) = source_opt.take() {
3100 nondust_htlc_sources.push(source);
3103 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3106 let holder_commitment_tx = HolderCommitmentTransaction::new(
3107 commitment_stats.tx,
3109 msg.htlc_signatures.clone(),
3110 &self.context.get_holder_pubkeys().funding_pubkey,
3111 self.context.counterparty_funding_pubkey()
3114 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3115 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3117 // Update state now that we've passed all the can-fail calls...
3118 let mut need_commitment = false;
3119 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3120 if *update_state == FeeUpdateState::RemoteAnnounced {
3121 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3122 need_commitment = true;
3126 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3127 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3128 Some(forward_info.clone())
3130 if let Some(forward_info) = new_forward {
3131 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3132 &htlc.payment_hash, &self.context.channel_id);
3133 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3134 need_commitment = true;
3137 let mut claimed_htlcs = Vec::new();
3138 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3139 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3140 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3141 &htlc.payment_hash, &self.context.channel_id);
3142 // Grab the preimage, if it exists, instead of cloning
3143 let mut reason = OutboundHTLCOutcome::Success(None);
3144 mem::swap(outcome, &mut reason);
3145 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3146 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3147 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3148 // have a `Success(None)` reason. In this case we could forget some HTLC
3149 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3150 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3152 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3154 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3155 need_commitment = true;
3159 self.context.latest_monitor_update_id += 1;
3160 let mut monitor_update = ChannelMonitorUpdate {
3161 update_id: self.context.latest_monitor_update_id,
3162 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3163 commitment_tx: holder_commitment_tx,
3164 htlc_outputs: htlcs_and_sigs,
3166 nondust_htlc_sources,
3170 self.context.cur_holder_commitment_transaction_number -= 1;
3171 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3172 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3173 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3175 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3176 // In case we initially failed monitor updating without requiring a response, we need
3177 // to make sure the RAA gets sent first.
3178 self.context.monitor_pending_revoke_and_ack = true;
3179 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3180 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3181 // the corresponding HTLC status updates so that
3182 // get_last_commitment_update_for_send includes the right HTLCs.
3183 self.context.monitor_pending_commitment_signed = true;
3184 let mut additional_update = self.build_commitment_no_status_check(logger);
3185 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3186 // strictly increasing by one, so decrement it here.
3187 self.context.latest_monitor_update_id = monitor_update.update_id;
3188 monitor_update.updates.append(&mut additional_update.updates);
3190 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3191 &self.context.channel_id);
3192 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3195 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3196 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3197 // we'll send one right away when we get the revoke_and_ack when we
3198 // free_holding_cell_htlcs().
3199 let mut additional_update = self.build_commitment_no_status_check(logger);
3200 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3201 // strictly increasing by one, so decrement it here.
3202 self.context.latest_monitor_update_id = monitor_update.update_id;
3203 monitor_update.updates.append(&mut additional_update.updates);
3207 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3208 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3209 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3210 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3213 /// Public version of the below, checking relevant preconditions first.
3214 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3215 /// returns `(None, Vec::new())`.
3216 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3217 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3218 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3219 where F::Target: FeeEstimator, L::Target: Logger
3221 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3222 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3223 self.free_holding_cell_htlcs(fee_estimator, logger)
3224 } else { (None, Vec::new()) }
3227 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3228 /// for our counterparty.
3229 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3230 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3231 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3232 where F::Target: FeeEstimator, L::Target: Logger
3234 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3235 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3236 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3237 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3239 let mut monitor_update = ChannelMonitorUpdate {
3240 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3241 updates: Vec::new(),
3244 let mut htlc_updates = Vec::new();
3245 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3246 let mut update_add_count = 0;
3247 let mut update_fulfill_count = 0;
3248 let mut update_fail_count = 0;
3249 let mut htlcs_to_fail = Vec::new();
3250 for htlc_update in htlc_updates.drain(..) {
3251 // Note that this *can* fail, though it should be due to rather-rare conditions on
3252 // fee races with adding too many outputs which push our total payments just over
3253 // the limit. In case it's less rare than I anticipate, we may want to revisit
3254 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3255 // to rebalance channels.
3256 match &htlc_update {
3257 &HTLCUpdateAwaitingACK::AddHTLC {
3258 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3259 skimmed_fee_msat, ..
3261 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3262 onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3264 Ok(_) => update_add_count += 1,
3267 ChannelError::Ignore(ref msg) => {
3268 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3269 // If we fail to send here, then this HTLC should
3270 // be failed backwards. Failing to send here
3271 // indicates that this HTLC may keep being put back
3272 // into the holding cell without ever being
3273 // successfully forwarded/failed/fulfilled, causing
3274 // our counterparty to eventually close on us.
3275 htlcs_to_fail.push((source.clone(), *payment_hash));
3278 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3284 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3285 // If an HTLC claim was previously added to the holding cell (via
3286 // `get_update_fulfill_htlc`, then generating the claim message itself must
3287 // not fail - any in between attempts to claim the HTLC will have resulted
3288 // in it hitting the holding cell again and we cannot change the state of a
3289 // holding cell HTLC from fulfill to anything else.
3290 let mut additional_monitor_update =
3291 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3292 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3293 { monitor_update } else { unreachable!() };
3294 update_fulfill_count += 1;
3295 monitor_update.updates.append(&mut additional_monitor_update.updates);
3297 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3298 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3299 Ok(update_fail_msg_option) => {
3300 // If an HTLC failure was previously added to the holding cell (via
3301 // `queue_fail_htlc`) then generating the fail message itself must
3302 // not fail - we should never end up in a state where we double-fail
3303 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3304 // for a full revocation before failing.
3305 debug_assert!(update_fail_msg_option.is_some());
3306 update_fail_count += 1;
3309 if let ChannelError::Ignore(_) = e {}
3311 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3318 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3319 return (None, htlcs_to_fail);
3321 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3322 self.send_update_fee(feerate, false, fee_estimator, logger)
3327 let mut additional_update = self.build_commitment_no_status_check(logger);
3328 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3329 // but we want them to be strictly increasing by one, so reset it here.
3330 self.context.latest_monitor_update_id = monitor_update.update_id;
3331 monitor_update.updates.append(&mut additional_update.updates);
3333 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3334 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3335 update_add_count, update_fulfill_count, update_fail_count);
3337 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3338 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3344 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3345 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3346 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3347 /// generating an appropriate error *after* the channel state has been updated based on the
3348 /// revoke_and_ack message.
3349 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3350 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3351 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3352 where F::Target: FeeEstimator, L::Target: Logger,
3354 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3355 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3357 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3358 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3360 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3361 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3364 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3366 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3367 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3368 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3372 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3373 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3374 // haven't given them a new commitment transaction to broadcast). We should probably
3375 // take advantage of this by updating our channel monitor, sending them an error, and
3376 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3377 // lot of work, and there's some chance this is all a misunderstanding anyway.
3378 // We have to do *something*, though, since our signer may get mad at us for otherwise
3379 // jumping a remote commitment number, so best to just force-close and move on.
3380 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3383 #[cfg(any(test, fuzzing))]
3385 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3386 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3389 match &self.context.holder_signer {
3390 ChannelSignerType::Ecdsa(ecdsa) => {
3391 ecdsa.validate_counterparty_revocation(
3392 self.context.cur_counterparty_commitment_transaction_number + 1,
3394 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3398 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3399 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3400 self.context.latest_monitor_update_id += 1;
3401 let mut monitor_update = ChannelMonitorUpdate {
3402 update_id: self.context.latest_monitor_update_id,
3403 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3404 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3405 secret: msg.per_commitment_secret,
3409 // Update state now that we've passed all the can-fail calls...
3410 // (note that we may still fail to generate the new commitment_signed message, but that's
3411 // OK, we step the channel here and *then* if the new generation fails we can fail the
3412 // channel based on that, but stepping stuff here should be safe either way.
3413 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3414 self.context.sent_message_awaiting_response = None;
3415 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3416 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3417 self.context.cur_counterparty_commitment_transaction_number -= 1;
3419 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3420 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3423 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3424 let mut to_forward_infos = Vec::new();
3425 let mut revoked_htlcs = Vec::new();
3426 let mut finalized_claimed_htlcs = Vec::new();
3427 let mut update_fail_htlcs = Vec::new();
3428 let mut update_fail_malformed_htlcs = Vec::new();
3429 let mut require_commitment = false;
3430 let mut value_to_self_msat_diff: i64 = 0;
3433 // Take references explicitly so that we can hold multiple references to self.context.
3434 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3435 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3437 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3438 pending_inbound_htlcs.retain(|htlc| {
3439 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3440 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3441 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3442 value_to_self_msat_diff += htlc.amount_msat as i64;
3447 pending_outbound_htlcs.retain(|htlc| {
3448 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3449 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3450 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3451 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3453 finalized_claimed_htlcs.push(htlc.source.clone());
3454 // They fulfilled, so we sent them money
3455 value_to_self_msat_diff -= htlc.amount_msat as i64;
3460 for htlc in pending_inbound_htlcs.iter_mut() {
3461 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3463 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3467 let mut state = InboundHTLCState::Committed;
3468 mem::swap(&mut state, &mut htlc.state);
3470 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3471 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3472 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3473 require_commitment = true;
3474 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3475 match forward_info {
3476 PendingHTLCStatus::Fail(fail_msg) => {
3477 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3478 require_commitment = true;
3480 HTLCFailureMsg::Relay(msg) => {
3481 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3482 update_fail_htlcs.push(msg)
3484 HTLCFailureMsg::Malformed(msg) => {
3485 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3486 update_fail_malformed_htlcs.push(msg)
3490 PendingHTLCStatus::Forward(forward_info) => {
3491 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3492 to_forward_infos.push((forward_info, htlc.htlc_id));
3493 htlc.state = InboundHTLCState::Committed;
3499 for htlc in pending_outbound_htlcs.iter_mut() {
3500 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3501 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3502 htlc.state = OutboundHTLCState::Committed;
3504 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3505 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3506 // Grab the preimage, if it exists, instead of cloning
3507 let mut reason = OutboundHTLCOutcome::Success(None);
3508 mem::swap(outcome, &mut reason);
3509 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3510 require_commitment = true;
3514 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3516 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3517 match update_state {
3518 FeeUpdateState::Outbound => {
3519 debug_assert!(self.context.is_outbound());
3520 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3521 self.context.feerate_per_kw = feerate;
3522 self.context.pending_update_fee = None;
3524 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3525 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3526 debug_assert!(!self.context.is_outbound());
3527 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3528 require_commitment = true;
3529 self.context.feerate_per_kw = feerate;
3530 self.context.pending_update_fee = None;
3535 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3536 let release_state_str =
3537 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3538 macro_rules! return_with_htlcs_to_fail {
3539 ($htlcs_to_fail: expr) => {
3540 if !release_monitor {
3541 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3542 update: monitor_update,
3544 return Ok(($htlcs_to_fail, None));
3546 return Ok(($htlcs_to_fail, Some(monitor_update)));
3551 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3552 // We can't actually generate a new commitment transaction (incl by freeing holding
3553 // cells) while we can't update the monitor, so we just return what we have.
3554 if require_commitment {
3555 self.context.monitor_pending_commitment_signed = true;
3556 // When the monitor updating is restored we'll call
3557 // get_last_commitment_update_for_send(), which does not update state, but we're
3558 // definitely now awaiting a remote revoke before we can step forward any more, so
3560 let mut additional_update = self.build_commitment_no_status_check(logger);
3561 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3562 // strictly increasing by one, so decrement it here.
3563 self.context.latest_monitor_update_id = monitor_update.update_id;
3564 monitor_update.updates.append(&mut additional_update.updates);
3566 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3567 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3568 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3569 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3570 return_with_htlcs_to_fail!(Vec::new());
3573 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3574 (Some(mut additional_update), htlcs_to_fail) => {
3575 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3576 // strictly increasing by one, so decrement it here.
3577 self.context.latest_monitor_update_id = monitor_update.update_id;
3578 monitor_update.updates.append(&mut additional_update.updates);
3580 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3581 &self.context.channel_id(), release_state_str);
3583 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3584 return_with_htlcs_to_fail!(htlcs_to_fail);
3586 (None, htlcs_to_fail) => {
3587 if require_commitment {
3588 let mut additional_update = self.build_commitment_no_status_check(logger);
3590 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3591 // strictly increasing by one, so decrement it here.
3592 self.context.latest_monitor_update_id = monitor_update.update_id;
3593 monitor_update.updates.append(&mut additional_update.updates);
3595 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3596 &self.context.channel_id(),
3597 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3600 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3601 return_with_htlcs_to_fail!(htlcs_to_fail);
3603 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3604 &self.context.channel_id(), release_state_str);
3606 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3607 return_with_htlcs_to_fail!(htlcs_to_fail);
3613 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3614 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3615 /// commitment update.
3616 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3617 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3618 where F::Target: FeeEstimator, L::Target: Logger
3620 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3621 assert!(msg_opt.is_none(), "We forced holding cell?");
3624 /// Adds a pending update to this channel. See the doc for send_htlc for
3625 /// further details on the optionness of the return value.
3626 /// If our balance is too low to cover the cost of the next commitment transaction at the
3627 /// new feerate, the update is cancelled.
3629 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3630 /// [`Channel`] if `force_holding_cell` is false.
3631 fn send_update_fee<F: Deref, L: Deref>(
3632 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3633 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3634 ) -> Option<msgs::UpdateFee>
3635 where F::Target: FeeEstimator, L::Target: Logger
3637 if !self.context.is_outbound() {
3638 panic!("Cannot send fee from inbound channel");
3640 if !self.context.is_usable() {
3641 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3643 if !self.context.is_live() {
3644 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3647 // Before proposing a feerate update, check that we can actually afford the new fee.
3648 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3649 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3650 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3651 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3652 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3653 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3654 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3655 //TODO: auto-close after a number of failures?
3656 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3660 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3661 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3662 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3663 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3664 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3665 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3668 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3669 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3673 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3674 force_holding_cell = true;
3677 if force_holding_cell {
3678 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3682 debug_assert!(self.context.pending_update_fee.is_none());
3683 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3685 Some(msgs::UpdateFee {
3686 channel_id: self.context.channel_id,
3691 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3692 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3694 /// No further message handling calls may be made until a channel_reestablish dance has
3696 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3697 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3698 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3699 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3703 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3704 // While the below code should be idempotent, it's simpler to just return early, as
3705 // redundant disconnect events can fire, though they should be rare.
3709 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3710 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3713 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3714 // will be retransmitted.
3715 self.context.last_sent_closing_fee = None;
3716 self.context.pending_counterparty_closing_signed = None;
3717 self.context.closing_fee_limits = None;
3719 let mut inbound_drop_count = 0;
3720 self.context.pending_inbound_htlcs.retain(|htlc| {
3722 InboundHTLCState::RemoteAnnounced(_) => {
3723 // They sent us an update_add_htlc but we never got the commitment_signed.
3724 // We'll tell them what commitment_signed we're expecting next and they'll drop
3725 // this HTLC accordingly
3726 inbound_drop_count += 1;
3729 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3730 // We received a commitment_signed updating this HTLC and (at least hopefully)
3731 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3732 // in response to it yet, so don't touch it.
3735 InboundHTLCState::Committed => true,
3736 InboundHTLCState::LocalRemoved(_) => {
3737 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3738 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3739 // (that we missed). Keep this around for now and if they tell us they missed
3740 // the commitment_signed we can re-transmit the update then.
3745 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3747 if let Some((_, update_state)) = self.context.pending_update_fee {
3748 if update_state == FeeUpdateState::RemoteAnnounced {
3749 debug_assert!(!self.context.is_outbound());
3750 self.context.pending_update_fee = None;
3754 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3755 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3756 // They sent us an update to remove this but haven't yet sent the corresponding
3757 // commitment_signed, we need to move it back to Committed and they can re-send
3758 // the update upon reconnection.
3759 htlc.state = OutboundHTLCState::Committed;
3763 self.context.sent_message_awaiting_response = None;
3765 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3766 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3770 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3771 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3772 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3773 /// update completes (potentially immediately).
3774 /// The messages which were generated with the monitor update must *not* have been sent to the
3775 /// remote end, and must instead have been dropped. They will be regenerated when
3776 /// [`Self::monitor_updating_restored`] is called.
3778 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3779 /// [`chain::Watch`]: crate::chain::Watch
3780 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3781 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3782 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3783 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3784 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3786 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3787 self.context.monitor_pending_commitment_signed |= resend_commitment;
3788 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3789 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3790 self.context.monitor_pending_failures.append(&mut pending_fails);
3791 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3792 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3795 /// Indicates that the latest ChannelMonitor update has been committed by the client
3796 /// successfully and we should restore normal operation. Returns messages which should be sent
3797 /// to the remote side.
3798 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3799 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3800 user_config: &UserConfig, best_block_height: u32
3801 ) -> MonitorRestoreUpdates
3804 NS::Target: NodeSigner
3806 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3807 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3809 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3810 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3811 // first received the funding_signed.
3812 let mut funding_broadcastable =
3813 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3814 self.context.funding_transaction.take()
3816 // That said, if the funding transaction is already confirmed (ie we're active with a
3817 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3818 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3819 funding_broadcastable = None;
3822 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3823 // (and we assume the user never directly broadcasts the funding transaction and waits for
3824 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3825 // * an inbound channel that failed to persist the monitor on funding_created and we got
3826 // the funding transaction confirmed before the monitor was persisted, or
3827 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3828 let channel_ready = if self.context.monitor_pending_channel_ready {
3829 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3830 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3831 self.context.monitor_pending_channel_ready = false;
3832 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3833 Some(msgs::ChannelReady {
3834 channel_id: self.context.channel_id(),
3835 next_per_commitment_point,
3836 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3840 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3842 let mut accepted_htlcs = Vec::new();
3843 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3844 let mut failed_htlcs = Vec::new();
3845 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3846 let mut finalized_claimed_htlcs = Vec::new();
3847 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3849 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3850 self.context.monitor_pending_revoke_and_ack = false;
3851 self.context.monitor_pending_commitment_signed = false;
3852 return MonitorRestoreUpdates {
3853 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3854 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3858 let raa = if self.context.monitor_pending_revoke_and_ack {
3859 Some(self.get_last_revoke_and_ack())
3861 let commitment_update = if self.context.monitor_pending_commitment_signed {
3862 self.get_last_commitment_update_for_send(logger).ok()
3864 if commitment_update.is_some() {
3865 self.mark_awaiting_response();
3868 self.context.monitor_pending_revoke_and_ack = false;
3869 self.context.monitor_pending_commitment_signed = false;
3870 let order = self.context.resend_order.clone();
3871 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3872 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3873 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3874 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3875 MonitorRestoreUpdates {
3876 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3880 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3881 where F::Target: FeeEstimator, L::Target: Logger
3883 if self.context.is_outbound() {
3884 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3886 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3887 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3889 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3890 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3892 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3893 self.context.update_time_counter += 1;
3894 // If the feerate has increased over the previous dust buffer (note that
3895 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3896 // won't be pushed over our dust exposure limit by the feerate increase.
3897 if feerate_over_dust_buffer {
3898 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3899 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3900 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3901 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3902 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3903 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3904 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3905 msg.feerate_per_kw, holder_tx_dust_exposure)));
3907 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3908 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3909 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3915 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
3916 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3917 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
3918 msgs::RevokeAndACK {
3919 channel_id: self.context.channel_id,
3920 per_commitment_secret,
3921 next_per_commitment_point,
3923 next_local_nonce: None,
3927 /// Gets the last commitment update for immediate sending to our peer.
3928 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
3929 let mut update_add_htlcs = Vec::new();
3930 let mut update_fulfill_htlcs = Vec::new();
3931 let mut update_fail_htlcs = Vec::new();
3932 let mut update_fail_malformed_htlcs = Vec::new();
3934 for htlc in self.context.pending_outbound_htlcs.iter() {
3935 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
3936 update_add_htlcs.push(msgs::UpdateAddHTLC {
3937 channel_id: self.context.channel_id(),
3938 htlc_id: htlc.htlc_id,
3939 amount_msat: htlc.amount_msat,
3940 payment_hash: htlc.payment_hash,
3941 cltv_expiry: htlc.cltv_expiry,
3942 onion_routing_packet: (**onion_packet).clone(),
3943 skimmed_fee_msat: htlc.skimmed_fee_msat,
3948 for htlc in self.context.pending_inbound_htlcs.iter() {
3949 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3951 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
3952 update_fail_htlcs.push(msgs::UpdateFailHTLC {
3953 channel_id: self.context.channel_id(),
3954 htlc_id: htlc.htlc_id,
3955 reason: err_packet.clone()
3958 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
3959 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
3960 channel_id: self.context.channel_id(),
3961 htlc_id: htlc.htlc_id,
3962 sha256_of_onion: sha256_of_onion.clone(),
3963 failure_code: failure_code.clone(),
3966 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
3967 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
3968 channel_id: self.context.channel_id(),
3969 htlc_id: htlc.htlc_id,
3970 payment_preimage: payment_preimage.clone(),
3977 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
3978 Some(msgs::UpdateFee {
3979 channel_id: self.context.channel_id(),
3980 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
3984 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3985 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
3986 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3987 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
3988 if self.context.signer_pending_commitment_update {
3989 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
3990 self.context.signer_pending_commitment_update = false;
3994 if !self.context.signer_pending_commitment_update {
3995 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
3996 self.context.signer_pending_commitment_update = true;
4000 Ok(msgs::CommitmentUpdate {
4001 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4006 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4007 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4008 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4009 assert!(self.context.shutdown_scriptpubkey.is_some());
4010 Some(msgs::Shutdown {
4011 channel_id: self.context.channel_id,
4012 scriptpubkey: self.get_closing_scriptpubkey(),
4017 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4018 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4020 /// Some links printed in log lines are included here to check them during build (when run with
4021 /// `cargo doc --document-private-items`):
4022 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4023 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4024 pub fn channel_reestablish<L: Deref, NS: Deref>(
4025 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4026 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4027 ) -> Result<ReestablishResponses, ChannelError>
4030 NS::Target: NodeSigner
4032 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4033 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4034 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4035 // just close here instead of trying to recover.
4036 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4039 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4040 msg.next_local_commitment_number == 0 {
4041 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4044 if msg.next_remote_commitment_number > 0 {
4045 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4046 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4047 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4048 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4049 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4051 if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4052 macro_rules! log_and_panic {
4053 ($err_msg: expr) => {
4054 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4055 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4058 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4059 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4060 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4061 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4062 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4063 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4064 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4065 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4069 // Before we change the state of the channel, we check if the peer is sending a very old
4070 // commitment transaction number, if yes we send a warning message.
4071 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4072 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4074 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4078 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4079 // remaining cases either succeed or ErrorMessage-fail).
4080 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4081 self.context.sent_message_awaiting_response = None;
4083 let shutdown_msg = self.get_outbound_shutdown();
4085 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4087 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4088 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4089 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4090 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4091 if msg.next_remote_commitment_number != 0 {
4092 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4094 // Short circuit the whole handler as there is nothing we can resend them
4095 return Ok(ReestablishResponses {
4096 channel_ready: None,
4097 raa: None, commitment_update: None,
4098 order: RAACommitmentOrder::CommitmentFirst,
4099 shutdown_msg, announcement_sigs,
4103 // We have OurChannelReady set!
4104 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4105 return Ok(ReestablishResponses {
4106 channel_ready: Some(msgs::ChannelReady {
4107 channel_id: self.context.channel_id(),
4108 next_per_commitment_point,
4109 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4111 raa: None, commitment_update: None,
4112 order: RAACommitmentOrder::CommitmentFirst,
4113 shutdown_msg, announcement_sigs,
4117 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4118 // Remote isn't waiting on any RevokeAndACK from us!
4119 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4121 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4122 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4123 self.context.monitor_pending_revoke_and_ack = true;
4126 Some(self.get_last_revoke_and_ack())
4129 return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4132 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4133 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4134 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4135 // the corresponding revoke_and_ack back yet.
4136 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4137 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4138 self.mark_awaiting_response();
4140 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4142 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4143 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4144 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4145 Some(msgs::ChannelReady {
4146 channel_id: self.context.channel_id(),
4147 next_per_commitment_point,
4148 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4152 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4153 if required_revoke.is_some() {
4154 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4156 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4159 Ok(ReestablishResponses {
4160 channel_ready, shutdown_msg, announcement_sigs,
4161 raa: required_revoke,
4162 commitment_update: None,
4163 order: self.context.resend_order.clone(),
4165 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4166 if required_revoke.is_some() {
4167 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4169 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4172 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4173 self.context.monitor_pending_commitment_signed = true;
4174 Ok(ReestablishResponses {
4175 channel_ready, shutdown_msg, announcement_sigs,
4176 commitment_update: None, raa: None,
4177 order: self.context.resend_order.clone(),
4180 Ok(ReestablishResponses {
4181 channel_ready, shutdown_msg, announcement_sigs,
4182 raa: required_revoke,
4183 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4184 order: self.context.resend_order.clone(),
4188 Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4192 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4193 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4194 /// at which point they will be recalculated.
4195 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4197 where F::Target: FeeEstimator
4199 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4201 // Propose a range from our current Background feerate to our Normal feerate plus our
4202 // force_close_avoidance_max_fee_satoshis.
4203 // If we fail to come to consensus, we'll have to force-close.
4204 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4205 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4206 // that we don't expect to need fee bumping
4207 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4208 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4210 // The spec requires that (when the channel does not have anchors) we only send absolute
4211 // channel fees no greater than the absolute channel fee on the current commitment
4212 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4213 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4214 // some force-closure by old nodes, but we wanted to close the channel anyway.
4216 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4217 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4218 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4219 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4222 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4223 // below our dust limit, causing the output to disappear. We don't bother handling this
4224 // case, however, as this should only happen if a channel is closed before any (material)
4225 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4226 // come to consensus with our counterparty on appropriate fees, however it should be a
4227 // relatively rare case. We can revisit this later, though note that in order to determine
4228 // if the funders' output is dust we have to know the absolute fee we're going to use.
4229 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4230 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4231 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4232 // We always add force_close_avoidance_max_fee_satoshis to our normal
4233 // feerate-calculated fee, but allow the max to be overridden if we're using a
4234 // target feerate-calculated fee.
4235 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4236 proposed_max_feerate as u64 * tx_weight / 1000)
4238 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4241 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4242 self.context.closing_fee_limits.clone().unwrap()
4245 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4246 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4247 /// this point if we're the funder we should send the initial closing_signed, and in any case
4248 /// shutdown should complete within a reasonable timeframe.
4249 fn closing_negotiation_ready(&self) -> bool {
4250 self.context.closing_negotiation_ready()
4253 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4254 /// an Err if no progress is being made and the channel should be force-closed instead.
4255 /// Should be called on a one-minute timer.
4256 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4257 if self.closing_negotiation_ready() {
4258 if self.context.closing_signed_in_flight {
4259 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4261 self.context.closing_signed_in_flight = true;
4267 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4268 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4269 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4270 where F::Target: FeeEstimator, L::Target: Logger
4272 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4273 return Ok((None, None, None));
4276 if !self.context.is_outbound() {
4277 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4278 return self.closing_signed(fee_estimator, &msg);
4280 return Ok((None, None, None));
4283 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4285 assert!(self.context.shutdown_scriptpubkey.is_some());
4286 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4287 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4288 our_min_fee, our_max_fee, total_fee_satoshis);
4290 match &self.context.holder_signer {
4291 ChannelSignerType::Ecdsa(ecdsa) => {
4293 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4294 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4296 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4297 Ok((Some(msgs::ClosingSigned {
4298 channel_id: self.context.channel_id,
4299 fee_satoshis: total_fee_satoshis,
4301 fee_range: Some(msgs::ClosingSignedFeeRange {
4302 min_fee_satoshis: our_min_fee,
4303 max_fee_satoshis: our_max_fee,
4310 // Marks a channel as waiting for a response from the counterparty. If it's not received
4311 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4313 fn mark_awaiting_response(&mut self) {
4314 self.context.sent_message_awaiting_response = Some(0);
4317 /// Determines whether we should disconnect the counterparty due to not receiving a response
4318 /// within our expected timeframe.
4320 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4321 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4322 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4325 // Don't disconnect when we're not waiting on a response.
4328 *ticks_elapsed += 1;
4329 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4333 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4334 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4336 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4337 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4339 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4340 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4341 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4342 // can do that via error message without getting a connection fail anyway...
4343 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4345 for htlc in self.context.pending_inbound_htlcs.iter() {
4346 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4347 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4350 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4352 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4353 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4356 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4357 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4358 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4361 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4364 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4365 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4366 // any further commitment updates after we set LocalShutdownSent.
4367 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4369 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4372 assert!(send_shutdown);
4373 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4374 Ok(scriptpubkey) => scriptpubkey,
4375 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4377 if !shutdown_scriptpubkey.is_compatible(their_features) {
4378 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4380 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4385 // From here on out, we may not fail!
4387 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4388 self.context.update_time_counter += 1;
4390 let monitor_update = if update_shutdown_script {
4391 self.context.latest_monitor_update_id += 1;
4392 let monitor_update = ChannelMonitorUpdate {
4393 update_id: self.context.latest_monitor_update_id,
4394 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4395 scriptpubkey: self.get_closing_scriptpubkey(),
4398 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4399 self.push_ret_blockable_mon_update(monitor_update)
4401 let shutdown = if send_shutdown {
4402 Some(msgs::Shutdown {
4403 channel_id: self.context.channel_id,
4404 scriptpubkey: self.get_closing_scriptpubkey(),
4408 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4409 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4410 // cell HTLCs and return them to fail the payment.
4411 self.context.holding_cell_update_fee = None;
4412 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4413 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4415 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4416 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4423 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4424 self.context.update_time_counter += 1;
4426 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4429 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4430 let mut tx = closing_tx.trust().built_transaction().clone();
4432 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4434 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4435 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4436 let mut holder_sig = sig.serialize_der().to_vec();
4437 holder_sig.push(EcdsaSighashType::All as u8);
4438 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4439 cp_sig.push(EcdsaSighashType::All as u8);
4440 if funding_key[..] < counterparty_funding_key[..] {
4441 tx.input[0].witness.push(holder_sig);
4442 tx.input[0].witness.push(cp_sig);
4444 tx.input[0].witness.push(cp_sig);
4445 tx.input[0].witness.push(holder_sig);
4448 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4452 pub fn closing_signed<F: Deref>(
4453 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4454 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4455 where F::Target: FeeEstimator
4457 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4458 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4460 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4461 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4463 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4464 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4466 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4467 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4470 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4471 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4474 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4475 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4476 return Ok((None, None, None));
4479 let funding_redeemscript = self.context.get_funding_redeemscript();
4480 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4481 if used_total_fee != msg.fee_satoshis {
4482 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4484 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4486 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4489 // The remote end may have decided to revoke their output due to inconsistent dust
4490 // limits, so check for that case by re-checking the signature here.
4491 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4492 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4493 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4497 for outp in closing_tx.trust().built_transaction().output.iter() {
4498 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4499 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4503 assert!(self.context.shutdown_scriptpubkey.is_some());
4504 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4505 if last_fee == msg.fee_satoshis {
4506 let shutdown_result = ShutdownResult {
4507 monitor_update: None,
4508 dropped_outbound_htlcs: Vec::new(),
4509 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4511 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4512 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4513 self.context.update_time_counter += 1;
4514 return Ok((None, Some(tx), Some(shutdown_result)));
4518 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4520 macro_rules! propose_fee {
4521 ($new_fee: expr) => {
4522 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4523 (closing_tx, $new_fee)
4525 self.build_closing_transaction($new_fee, false)
4528 return match &self.context.holder_signer {
4529 ChannelSignerType::Ecdsa(ecdsa) => {
4531 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4532 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4533 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4534 let shutdown_result = ShutdownResult {
4535 monitor_update: None,
4536 dropped_outbound_htlcs: Vec::new(),
4537 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4539 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4540 self.context.update_time_counter += 1;
4541 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4542 (Some(tx), Some(shutdown_result))
4547 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4548 Ok((Some(msgs::ClosingSigned {
4549 channel_id: self.context.channel_id,
4550 fee_satoshis: used_fee,
4552 fee_range: Some(msgs::ClosingSignedFeeRange {
4553 min_fee_satoshis: our_min_fee,
4554 max_fee_satoshis: our_max_fee,
4556 }), signed_tx, shutdown_result))
4562 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4563 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4564 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4566 if max_fee_satoshis < our_min_fee {
4567 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4569 if min_fee_satoshis > our_max_fee {
4570 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4573 if !self.context.is_outbound() {
4574 // They have to pay, so pick the highest fee in the overlapping range.
4575 // We should never set an upper bound aside from their full balance
4576 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4577 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4579 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4580 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4581 msg.fee_satoshis, our_min_fee, our_max_fee)));
4583 // The proposed fee is in our acceptable range, accept it and broadcast!
4584 propose_fee!(msg.fee_satoshis);
4587 // Old fee style negotiation. We don't bother to enforce whether they are complying
4588 // with the "making progress" requirements, we just comply and hope for the best.
4589 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4590 if msg.fee_satoshis > last_fee {
4591 if msg.fee_satoshis < our_max_fee {
4592 propose_fee!(msg.fee_satoshis);
4593 } else if last_fee < our_max_fee {
4594 propose_fee!(our_max_fee);
4596 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4599 if msg.fee_satoshis > our_min_fee {
4600 propose_fee!(msg.fee_satoshis);
4601 } else if last_fee > our_min_fee {
4602 propose_fee!(our_min_fee);
4604 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4608 if msg.fee_satoshis < our_min_fee {
4609 propose_fee!(our_min_fee);
4610 } else if msg.fee_satoshis > our_max_fee {
4611 propose_fee!(our_max_fee);
4613 propose_fee!(msg.fee_satoshis);
4619 fn internal_htlc_satisfies_config(
4620 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4621 ) -> Result<(), (&'static str, u16)> {
4622 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4623 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4624 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4625 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4627 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4628 0x1000 | 12, // fee_insufficient
4631 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4633 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4634 0x1000 | 13, // incorrect_cltv_expiry
4640 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4641 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4642 /// unsuccessful, falls back to the previous one if one exists.
4643 pub fn htlc_satisfies_config(
4644 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4645 ) -> Result<(), (&'static str, u16)> {
4646 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4648 if let Some(prev_config) = self.context.prev_config() {
4649 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4656 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4657 self.context.cur_holder_commitment_transaction_number + 1
4660 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4661 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4664 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4665 self.context.cur_counterparty_commitment_transaction_number + 2
4669 pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
4670 &self.context.holder_signer
4674 pub fn get_value_stat(&self) -> ChannelValueStat {
4676 value_to_self_msat: self.context.value_to_self_msat,
4677 channel_value_msat: self.context.channel_value_satoshis * 1000,
4678 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4679 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4680 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4681 holding_cell_outbound_amount_msat: {
4683 for h in self.context.holding_cell_htlc_updates.iter() {
4685 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4693 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4694 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4698 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4699 /// Allowed in any state (including after shutdown)
4700 pub fn is_awaiting_monitor_update(&self) -> bool {
4701 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4704 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4705 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4706 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4707 self.context.blocked_monitor_updates[0].update.update_id - 1
4710 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4711 /// further blocked monitor update exists after the next.
4712 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4713 if self.context.blocked_monitor_updates.is_empty() { return None; }
4714 Some((self.context.blocked_monitor_updates.remove(0).update,
4715 !self.context.blocked_monitor_updates.is_empty()))
4718 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4719 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4720 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4721 -> Option<ChannelMonitorUpdate> {
4722 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4723 if !release_monitor {
4724 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4733 pub fn blocked_monitor_updates_pending(&self) -> usize {
4734 self.context.blocked_monitor_updates.len()
4737 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4738 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4739 /// transaction. If the channel is inbound, this implies simply that the channel has not
4741 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4742 if !self.is_awaiting_monitor_update() { return false; }
4743 if self.context.channel_state &
4744 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4745 == ChannelState::FundingSent as u32 {
4746 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4747 // FundingSent set, though our peer could have sent their channel_ready.
4748 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4751 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4752 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4753 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4754 // waiting for the initial monitor persistence. Thus, we check if our commitment
4755 // transaction numbers have both been iterated only exactly once (for the
4756 // funding_signed), and we're awaiting monitor update.
4758 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4759 // only way to get an awaiting-monitor-update state during initial funding is if the
4760 // initial monitor persistence is still pending).
4762 // Because deciding we're awaiting initial broadcast spuriously could result in
4763 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4764 // we hard-assert here, even in production builds.
4765 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4766 assert!(self.context.monitor_pending_channel_ready);
4767 assert_eq!(self.context.latest_monitor_update_id, 0);
4773 /// Returns true if our channel_ready has been sent
4774 pub fn is_our_channel_ready(&self) -> bool {
4775 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4778 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4779 pub fn received_shutdown(&self) -> bool {
4780 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4783 /// Returns true if we either initiated or agreed to shut down the channel.
4784 pub fn sent_shutdown(&self) -> bool {
4785 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4788 /// Returns true if this channel is fully shut down. True here implies that no further actions
4789 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4790 /// will be handled appropriately by the chain monitor.
4791 pub fn is_shutdown(&self) -> bool {
4792 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4793 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4798 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4799 self.context.channel_update_status
4802 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4803 self.context.update_time_counter += 1;
4804 self.context.channel_update_status = status;
4807 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4809 // * always when a new block/transactions are confirmed with the new height
4810 // * when funding is signed with a height of 0
4811 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4815 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4816 if funding_tx_confirmations <= 0 {
4817 self.context.funding_tx_confirmation_height = 0;
4820 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4824 // If we're still pending the signature on a funding transaction, then we're not ready to send a
4825 // channel_ready yet.
4826 if self.context.signer_pending_funding {
4830 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4831 // channel_ready until the entire batch is ready.
4832 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4833 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4834 self.context.channel_state |= ChannelState::OurChannelReady as u32;
4836 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4837 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4838 self.context.update_time_counter += 1;
4840 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4841 // We got a reorg but not enough to trigger a force close, just ignore.
4844 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4845 // We should never see a funding transaction on-chain until we've received
4846 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4847 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4848 // however, may do this and we shouldn't treat it as a bug.
4849 #[cfg(not(fuzzing))]
4850 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4851 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4852 self.context.channel_state);
4854 // We got a reorg but not enough to trigger a force close, just ignore.
4858 if need_commitment_update {
4859 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4860 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4861 let next_per_commitment_point =
4862 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4863 return Some(msgs::ChannelReady {
4864 channel_id: self.context.channel_id,
4865 next_per_commitment_point,
4866 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4870 self.context.monitor_pending_channel_ready = true;
4876 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4877 /// In the first case, we store the confirmation height and calculating the short channel id.
4878 /// In the second, we simply return an Err indicating we need to be force-closed now.
4879 pub fn transactions_confirmed<NS: Deref, L: Deref>(
4880 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
4881 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
4882 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4884 NS::Target: NodeSigner,
4887 let mut msgs = (None, None);
4888 if let Some(funding_txo) = self.context.get_funding_txo() {
4889 for &(index_in_block, tx) in txdata.iter() {
4890 // Check if the transaction is the expected funding transaction, and if it is,
4891 // check that it pays the right amount to the right script.
4892 if self.context.funding_tx_confirmation_height == 0 {
4893 if tx.txid() == funding_txo.txid {
4894 let txo_idx = funding_txo.index as usize;
4895 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
4896 tx.output[txo_idx].value != self.context.channel_value_satoshis {
4897 if self.context.is_outbound() {
4898 // If we generated the funding transaction and it doesn't match what it
4899 // should, the client is really broken and we should just panic and
4900 // tell them off. That said, because hash collisions happen with high
4901 // probability in fuzzing mode, if we're fuzzing we just close the
4902 // channel and move on.
4903 #[cfg(not(fuzzing))]
4904 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4906 self.context.update_time_counter += 1;
4907 let err_reason = "funding tx had wrong script/value or output index";
4908 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
4910 if self.context.is_outbound() {
4911 if !tx.is_coin_base() {
4912 for input in tx.input.iter() {
4913 if input.witness.is_empty() {
4914 // We generated a malleable funding transaction, implying we've
4915 // just exposed ourselves to funds loss to our counterparty.
4916 #[cfg(not(fuzzing))]
4917 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4922 self.context.funding_tx_confirmation_height = height;
4923 self.context.funding_tx_confirmed_in = Some(*block_hash);
4924 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
4925 Ok(scid) => Some(scid),
4926 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
4929 // If this is a coinbase transaction and not a 0-conf channel
4930 // we should update our min_depth to 100 to handle coinbase maturity
4931 if tx.is_coin_base() &&
4932 self.context.minimum_depth.unwrap_or(0) > 0 &&
4933 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
4934 self.context.minimum_depth = Some(COINBASE_MATURITY);
4937 // If we allow 1-conf funding, we may need to check for channel_ready here and
4938 // send it immediately instead of waiting for a best_block_updated call (which
4939 // may have already happened for this block).
4940 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4941 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4942 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
4943 msgs = (Some(channel_ready), announcement_sigs);
4946 for inp in tx.input.iter() {
4947 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
4948 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
4949 return Err(ClosureReason::CommitmentTxConfirmed);
4957 /// When a new block is connected, we check the height of the block against outbound holding
4958 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
4959 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
4960 /// handled by the ChannelMonitor.
4962 /// If we return Err, the channel may have been closed, at which point the standard
4963 /// requirements apply - no calls may be made except those explicitly stated to be allowed
4966 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
4968 pub fn best_block_updated<NS: Deref, L: Deref>(
4969 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
4970 node_signer: &NS, user_config: &UserConfig, logger: &L
4971 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4973 NS::Target: NodeSigner,
4976 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
4979 fn do_best_block_updated<NS: Deref, L: Deref>(
4980 &mut self, height: u32, highest_header_time: u32,
4981 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
4982 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4984 NS::Target: NodeSigner,
4987 let mut timed_out_htlcs = Vec::new();
4988 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4989 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
4991 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
4992 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4994 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
4995 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
4996 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5004 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5006 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5007 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5008 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5010 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5011 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5014 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5015 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5016 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5017 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5018 if self.context.funding_tx_confirmation_height == 0 {
5019 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5020 // zero if it has been reorged out, however in either case, our state flags
5021 // indicate we've already sent a channel_ready
5022 funding_tx_confirmations = 0;
5025 // If we've sent channel_ready (or have both sent and received channel_ready), and
5026 // the funding transaction has become unconfirmed,
5027 // close the channel and hope we can get the latest state on chain (because presumably
5028 // the funding transaction is at least still in the mempool of most nodes).
5030 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5031 // 0-conf channel, but not doing so may lead to the
5032 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5034 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5035 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5036 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5037 return Err(ClosureReason::ProcessingError { err: err_reason });
5039 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5040 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5041 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5042 // If funding_tx_confirmed_in is unset, the channel must not be active
5043 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5044 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5045 return Err(ClosureReason::FundingTimedOut);
5048 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5049 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5051 Ok((None, timed_out_htlcs, announcement_sigs))
5054 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5055 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5056 /// before the channel has reached channel_ready and we can just wait for more blocks.
5057 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5058 if self.context.funding_tx_confirmation_height != 0 {
5059 // We handle the funding disconnection by calling best_block_updated with a height one
5060 // below where our funding was connected, implying a reorg back to conf_height - 1.
5061 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5062 // We use the time field to bump the current time we set on channel updates if its
5063 // larger. If we don't know that time has moved forward, we can just set it to the last
5064 // time we saw and it will be ignored.
5065 let best_time = self.context.update_time_counter;
5066 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5067 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5068 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5069 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5070 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5076 // We never learned about the funding confirmation anyway, just ignore
5081 // Methods to get unprompted messages to send to the remote end (or where we already returned
5082 // something in the handler for the message that prompted this message):
5084 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5085 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5086 /// directions). Should be used for both broadcasted announcements and in response to an
5087 /// AnnouncementSignatures message from the remote peer.
5089 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5092 /// This will only return ChannelError::Ignore upon failure.
5094 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5095 fn get_channel_announcement<NS: Deref>(
5096 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5097 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5098 if !self.context.config.announced_channel {
5099 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5101 if !self.context.is_usable() {
5102 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5105 let short_channel_id = self.context.get_short_channel_id()
5106 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5107 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5108 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5109 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5110 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5112 let msg = msgs::UnsignedChannelAnnouncement {
5113 features: channelmanager::provided_channel_features(&user_config),
5116 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5117 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5118 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5119 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5120 excess_data: Vec::new(),
5126 fn get_announcement_sigs<NS: Deref, L: Deref>(
5127 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5128 best_block_height: u32, logger: &L
5129 ) -> Option<msgs::AnnouncementSignatures>
5131 NS::Target: NodeSigner,
5134 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5138 if !self.context.is_usable() {
5142 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5143 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5147 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5151 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5152 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5155 log_trace!(logger, "{:?}", e);
5159 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5161 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5166 match &self.context.holder_signer {
5167 ChannelSignerType::Ecdsa(ecdsa) => {
5168 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5170 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5175 let short_channel_id = match self.context.get_short_channel_id() {
5177 None => return None,
5180 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5182 Some(msgs::AnnouncementSignatures {
5183 channel_id: self.context.channel_id(),
5185 node_signature: our_node_sig,
5186 bitcoin_signature: our_bitcoin_sig,
5192 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5194 fn sign_channel_announcement<NS: Deref>(
5195 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5196 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5197 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5198 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5199 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5200 let were_node_one = announcement.node_id_1 == our_node_key;
5202 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5203 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5204 match &self.context.holder_signer {
5205 ChannelSignerType::Ecdsa(ecdsa) => {
5206 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5207 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5208 Ok(msgs::ChannelAnnouncement {
5209 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5210 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5211 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5212 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5213 contents: announcement,
5218 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5222 /// Processes an incoming announcement_signatures message, providing a fully-signed
5223 /// channel_announcement message which we can broadcast and storing our counterparty's
5224 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5225 pub fn announcement_signatures<NS: Deref>(
5226 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5227 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5228 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5229 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5231 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5233 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5234 return Err(ChannelError::Close(format!(
5235 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5236 &announcement, self.context.get_counterparty_node_id())));
5238 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5239 return Err(ChannelError::Close(format!(
5240 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5241 &announcement, self.context.counterparty_funding_pubkey())));
5244 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5245 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5246 return Err(ChannelError::Ignore(
5247 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5250 self.sign_channel_announcement(node_signer, announcement)
5253 /// Gets a signed channel_announcement for this channel, if we previously received an
5254 /// announcement_signatures from our counterparty.
5255 pub fn get_signed_channel_announcement<NS: Deref>(
5256 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5257 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5258 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5261 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5263 Err(_) => return None,
5265 match self.sign_channel_announcement(node_signer, announcement) {
5266 Ok(res) => Some(res),
5271 /// May panic if called on a channel that wasn't immediately-previously
5272 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5273 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5274 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5275 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5276 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5277 // current to_remote balances. However, it no longer has any use, and thus is now simply
5278 // set to a dummy (but valid, as required by the spec) public key.
5279 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5280 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5281 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5282 let mut pk = [2; 33]; pk[1] = 0xff;
5283 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5284 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5285 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5286 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5289 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5292 self.mark_awaiting_response();
5293 msgs::ChannelReestablish {
5294 channel_id: self.context.channel_id(),
5295 // The protocol has two different commitment number concepts - the "commitment
5296 // transaction number", which starts from 0 and counts up, and the "revocation key
5297 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5298 // commitment transaction numbers by the index which will be used to reveal the
5299 // revocation key for that commitment transaction, which means we have to convert them
5300 // to protocol-level commitment numbers here...
5302 // next_local_commitment_number is the next commitment_signed number we expect to
5303 // receive (indicating if they need to resend one that we missed).
5304 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5305 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5306 // receive, however we track it by the next commitment number for a remote transaction
5307 // (which is one further, as they always revoke previous commitment transaction, not
5308 // the one we send) so we have to decrement by 1. Note that if
5309 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5310 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5312 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5313 your_last_per_commitment_secret: remote_last_secret,
5314 my_current_per_commitment_point: dummy_pubkey,
5315 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5316 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5317 // txid of that interactive transaction, else we MUST NOT set it.
5318 next_funding_txid: None,
5323 // Send stuff to our remote peers:
5325 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5326 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5327 /// commitment update.
5329 /// `Err`s will only be [`ChannelError::Ignore`].
5330 pub fn queue_add_htlc<F: Deref, L: Deref>(
5331 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5332 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5333 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5334 ) -> Result<(), ChannelError>
5335 where F::Target: FeeEstimator, L::Target: Logger
5338 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5339 skimmed_fee_msat, fee_estimator, logger)
5340 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5342 if let ChannelError::Ignore(_) = err { /* fine */ }
5343 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5348 /// Adds a pending outbound HTLC to this channel, note that you probably want
5349 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5351 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5353 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5354 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5356 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5357 /// we may not yet have sent the previous commitment update messages and will need to
5358 /// regenerate them.
5360 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5361 /// on this [`Channel`] if `force_holding_cell` is false.
5363 /// `Err`s will only be [`ChannelError::Ignore`].
5364 fn send_htlc<F: Deref, L: Deref>(
5365 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5366 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5367 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5368 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5369 where F::Target: FeeEstimator, L::Target: Logger
5371 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5372 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5374 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5375 if amount_msat > channel_total_msat {
5376 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5379 if amount_msat == 0 {
5380 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5383 let available_balances = self.context.get_available_balances(fee_estimator);
5384 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5385 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5386 available_balances.next_outbound_htlc_minimum_msat)));
5389 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5390 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5391 available_balances.next_outbound_htlc_limit_msat)));
5394 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5395 // Note that this should never really happen, if we're !is_live() on receipt of an
5396 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5397 // the user to send directly into a !is_live() channel. However, if we
5398 // disconnected during the time the previous hop was doing the commitment dance we may
5399 // end up getting here after the forwarding delay. In any case, returning an
5400 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5401 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5404 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5405 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5406 payment_hash, amount_msat,
5407 if force_holding_cell { "into holding cell" }
5408 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5409 else { "to peer" });
5411 if need_holding_cell {
5412 force_holding_cell = true;
5415 // Now update local state:
5416 if force_holding_cell {
5417 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5422 onion_routing_packet,
5428 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5429 htlc_id: self.context.next_holder_htlc_id,
5431 payment_hash: payment_hash.clone(),
5433 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5438 let res = msgs::UpdateAddHTLC {
5439 channel_id: self.context.channel_id,
5440 htlc_id: self.context.next_holder_htlc_id,
5444 onion_routing_packet,
5447 self.context.next_holder_htlc_id += 1;
5452 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5453 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5454 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5455 // fail to generate this, we still are at least at a position where upgrading their status
5457 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5458 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5459 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5461 if let Some(state) = new_state {
5462 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5466 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5467 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5468 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5469 // Grab the preimage, if it exists, instead of cloning
5470 let mut reason = OutboundHTLCOutcome::Success(None);
5471 mem::swap(outcome, &mut reason);
5472 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5475 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5476 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5477 debug_assert!(!self.context.is_outbound());
5478 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5479 self.context.feerate_per_kw = feerate;
5480 self.context.pending_update_fee = None;
5483 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5485 let (mut htlcs_ref, counterparty_commitment_tx) =
5486 self.build_commitment_no_state_update(logger);
5487 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5488 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5489 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5491 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5492 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5495 self.context.latest_monitor_update_id += 1;
5496 let monitor_update = ChannelMonitorUpdate {
5497 update_id: self.context.latest_monitor_update_id,
5498 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5499 commitment_txid: counterparty_commitment_txid,
5500 htlc_outputs: htlcs.clone(),
5501 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5502 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5503 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5504 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5505 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5508 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5512 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5513 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5514 where L::Target: Logger
5516 let counterparty_keys = self.context.build_remote_transaction_keys();
5517 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5518 let counterparty_commitment_tx = commitment_stats.tx;
5520 #[cfg(any(test, fuzzing))]
5522 if !self.context.is_outbound() {
5523 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5524 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5525 if let Some(info) = projected_commit_tx_info {
5526 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5527 if info.total_pending_htlcs == total_pending_htlcs
5528 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5529 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5530 && info.feerate == self.context.feerate_per_kw {
5531 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5532 assert_eq!(actual_fee, info.fee);
5538 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5541 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5542 /// generation when we shouldn't change HTLC/channel state.
5543 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5544 // Get the fee tests from `build_commitment_no_state_update`
5545 #[cfg(any(test, fuzzing))]
5546 self.build_commitment_no_state_update(logger);
5548 let counterparty_keys = self.context.build_remote_transaction_keys();
5549 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5550 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5552 match &self.context.holder_signer {
5553 ChannelSignerType::Ecdsa(ecdsa) => {
5554 let (signature, htlc_signatures);
5557 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5558 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5562 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5563 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5565 htlc_signatures = res.1;
5567 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5568 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5569 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5570 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5572 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5573 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5574 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5575 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5576 log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5577 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5581 Ok((msgs::CommitmentSigned {
5582 channel_id: self.context.channel_id,
5586 partial_signature_with_nonce: None,
5587 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5592 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5593 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5595 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5596 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5597 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5598 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5599 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5600 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5601 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5602 where F::Target: FeeEstimator, L::Target: Logger
5604 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5605 onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5606 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5609 let monitor_update = self.build_commitment_no_status_check(logger);
5610 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5611 Ok(self.push_ret_blockable_mon_update(monitor_update))
5617 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5619 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5620 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5621 fee_base_msat: msg.contents.fee_base_msat,
5622 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5623 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5625 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5627 self.context.counterparty_forwarding_info = new_forwarding_info;
5633 /// Begins the shutdown process, getting a message for the remote peer and returning all
5634 /// holding cell HTLCs for payment failure.
5636 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5637 /// [`ChannelMonitorUpdate`] will be returned).
5638 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5639 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5640 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5642 for htlc in self.context.pending_outbound_htlcs.iter() {
5643 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5644 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5647 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5648 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5649 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5651 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5652 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5655 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5656 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5658 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5659 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5660 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5663 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5664 // script is set, we just force-close and call it a day.
5665 let mut chan_closed = false;
5666 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5670 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5672 None if !chan_closed => {
5673 // use override shutdown script if provided
5674 let shutdown_scriptpubkey = match override_shutdown_script {
5675 Some(script) => script,
5677 // otherwise, use the shutdown scriptpubkey provided by the signer
5678 match signer_provider.get_shutdown_scriptpubkey() {
5679 Ok(scriptpubkey) => scriptpubkey,
5680 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5684 if !shutdown_scriptpubkey.is_compatible(their_features) {
5685 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5687 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5693 // From here on out, we may not fail!
5694 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5695 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5696 let shutdown_result = ShutdownResult {
5697 monitor_update: None,
5698 dropped_outbound_htlcs: Vec::new(),
5699 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5701 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5702 Some(shutdown_result)
5704 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5707 self.context.update_time_counter += 1;
5709 let monitor_update = if update_shutdown_script {
5710 self.context.latest_monitor_update_id += 1;
5711 let monitor_update = ChannelMonitorUpdate {
5712 update_id: self.context.latest_monitor_update_id,
5713 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5714 scriptpubkey: self.get_closing_scriptpubkey(),
5717 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5718 self.push_ret_blockable_mon_update(monitor_update)
5720 let shutdown = msgs::Shutdown {
5721 channel_id: self.context.channel_id,
5722 scriptpubkey: self.get_closing_scriptpubkey(),
5725 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5726 // our shutdown until we've committed all of the pending changes.
5727 self.context.holding_cell_update_fee = None;
5728 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5729 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5731 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5732 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5739 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5740 "we can't both complete shutdown and return a monitor update");
5742 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5745 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5746 self.context.holding_cell_htlc_updates.iter()
5747 .flat_map(|htlc_update| {
5749 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5750 => Some((source, payment_hash)),
5754 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5758 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5759 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5760 pub context: ChannelContext<SP>,
5761 pub unfunded_context: UnfundedChannelContext,
5764 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5765 pub fn new<ES: Deref, F: Deref>(
5766 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5767 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5768 outbound_scid_alias: u64
5769 ) -> Result<OutboundV1Channel<SP>, APIError>
5770 where ES::Target: EntropySource,
5771 F::Target: FeeEstimator
5773 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5774 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5775 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5776 let pubkeys = holder_signer.pubkeys().clone();
5778 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5779 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5781 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5782 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5784 let channel_value_msat = channel_value_satoshis * 1000;
5785 if push_msat > channel_value_msat {
5786 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5788 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5789 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5791 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5792 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5793 // Protocol level safety check in place, although it should never happen because
5794 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5795 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5798 let channel_type = Self::get_initial_channel_type(&config, their_features);
5799 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5801 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5802 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5804 (ConfirmationTarget::NonAnchorChannelFee, 0)
5806 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5808 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5809 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5810 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5811 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5814 let mut secp_ctx = Secp256k1::new();
5815 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5817 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5818 match signer_provider.get_shutdown_scriptpubkey() {
5819 Ok(scriptpubkey) => Some(scriptpubkey),
5820 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5824 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5825 if !shutdown_scriptpubkey.is_compatible(&their_features) {
5826 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5830 let destination_script = match signer_provider.get_destination_script() {
5831 Ok(script) => script,
5832 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5835 let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
5838 context: ChannelContext {
5841 config: LegacyChannelConfig {
5842 options: config.channel_config.clone(),
5843 announced_channel: config.channel_handshake_config.announced_channel,
5844 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5849 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5851 channel_id: temporary_channel_id,
5852 temporary_channel_id: Some(temporary_channel_id),
5853 channel_state: ChannelState::OurInitSent as u32,
5854 announcement_sigs_state: AnnouncementSigsState::NotSent,
5856 channel_value_satoshis,
5858 latest_monitor_update_id: 0,
5860 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
5861 shutdown_scriptpubkey,
5864 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5865 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5868 pending_inbound_htlcs: Vec::new(),
5869 pending_outbound_htlcs: Vec::new(),
5870 holding_cell_htlc_updates: Vec::new(),
5871 pending_update_fee: None,
5872 holding_cell_update_fee: None,
5873 next_holder_htlc_id: 0,
5874 next_counterparty_htlc_id: 0,
5875 update_time_counter: 1,
5877 resend_order: RAACommitmentOrder::CommitmentFirst,
5879 monitor_pending_channel_ready: false,
5880 monitor_pending_revoke_and_ack: false,
5881 monitor_pending_commitment_signed: false,
5882 monitor_pending_forwards: Vec::new(),
5883 monitor_pending_failures: Vec::new(),
5884 monitor_pending_finalized_fulfills: Vec::new(),
5886 signer_pending_commitment_update: false,
5887 signer_pending_funding: false,
5889 #[cfg(debug_assertions)]
5890 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5891 #[cfg(debug_assertions)]
5892 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5894 last_sent_closing_fee: None,
5895 pending_counterparty_closing_signed: None,
5896 closing_fee_limits: None,
5897 target_closing_feerate_sats_per_kw: None,
5899 funding_tx_confirmed_in: None,
5900 funding_tx_confirmation_height: 0,
5901 short_channel_id: None,
5902 channel_creation_height: current_chain_height,
5904 feerate_per_kw: commitment_feerate,
5905 counterparty_dust_limit_satoshis: 0,
5906 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
5907 counterparty_max_htlc_value_in_flight_msat: 0,
5908 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
5909 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
5910 holder_selected_channel_reserve_satoshis,
5911 counterparty_htlc_minimum_msat: 0,
5912 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
5913 counterparty_max_accepted_htlcs: 0,
5914 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
5915 minimum_depth: None, // Filled in in accept_channel
5917 counterparty_forwarding_info: None,
5919 channel_transaction_parameters: ChannelTransactionParameters {
5920 holder_pubkeys: pubkeys,
5921 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
5922 is_outbound_from_holder: true,
5923 counterparty_parameters: None,
5924 funding_outpoint: None,
5925 channel_type_features: channel_type.clone()
5927 funding_transaction: None,
5928 is_batch_funding: None,
5930 counterparty_cur_commitment_point: None,
5931 counterparty_prev_commitment_point: None,
5932 counterparty_node_id,
5934 counterparty_shutdown_scriptpubkey: None,
5936 commitment_secrets: CounterpartyCommitmentSecrets::new(),
5938 channel_update_status: ChannelUpdateStatus::Enabled,
5939 closing_signed_in_flight: false,
5941 announcement_sigs: None,
5943 #[cfg(any(test, fuzzing))]
5944 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
5945 #[cfg(any(test, fuzzing))]
5946 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
5948 workaround_lnd_bug_4006: None,
5949 sent_message_awaiting_response: None,
5951 latest_inbound_scid_alias: None,
5952 outbound_scid_alias,
5954 channel_pending_event_emitted: false,
5955 channel_ready_event_emitted: false,
5957 #[cfg(any(test, fuzzing))]
5958 historical_inbound_htlc_fulfills: HashSet::new(),
5963 blocked_monitor_updates: Vec::new(),
5965 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
5969 fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ()> where L::Target: Logger {
5970 let counterparty_keys = self.context.build_remote_transaction_keys();
5971 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5972 match &self.context.holder_signer {
5973 // TODO (taproot|arik): move match into calling method for Taproot
5974 ChannelSignerType::Ecdsa(ecdsa) => {
5975 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5976 .map(|(sig, _)| sig)
5981 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5982 /// a funding_created message for the remote peer.
5983 /// Panics if called at some time other than immediately after initial handshake, if called twice,
5984 /// or if called on an inbound channel.
5985 /// Note that channel_id changes during this call!
5986 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5987 /// If an Err is returned, it is a ChannelError::Close.
5988 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
5989 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
5990 if !self.context.is_outbound() {
5991 panic!("Tried to create outbound funding_created message on an inbound channel!");
5993 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5994 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5996 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5997 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5998 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5999 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6002 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6003 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6005 let temporary_channel_id = self.context.channel_id;
6007 // Now that we're past error-generating stuff, update our local state:
6009 self.context.channel_state = ChannelState::FundingCreated as u32;
6010 self.context.channel_id = funding_txo.to_channel_id();
6012 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6013 // We can skip this if it is a zero-conf channel.
6014 if funding_transaction.is_coin_base() &&
6015 self.context.minimum_depth.unwrap_or(0) > 0 &&
6016 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6017 self.context.minimum_depth = Some(COINBASE_MATURITY);
6020 self.context.funding_transaction = Some(funding_transaction);
6021 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6023 let funding_created = if let Ok(signature) = self.get_funding_created_signature(logger) {
6024 Some(msgs::FundingCreated {
6025 temporary_channel_id,
6026 funding_txid: funding_txo.txid,
6027 funding_output_index: funding_txo.index,
6030 partial_signature_with_nonce: None,
6032 next_local_nonce: None,
6035 self.context.signer_pending_funding = true;
6039 let channel = Channel {
6040 context: self.context,
6043 Ok((channel, funding_created))
6046 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6047 // The default channel type (ie the first one we try) depends on whether the channel is
6048 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6049 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6050 // with no other changes, and fall back to `only_static_remotekey`.
6051 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6052 if !config.channel_handshake_config.announced_channel &&
6053 config.channel_handshake_config.negotiate_scid_privacy &&
6054 their_features.supports_scid_privacy() {
6055 ret.set_scid_privacy_required();
6058 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6059 // set it now. If they don't understand it, we'll fall back to our default of
6060 // `only_static_remotekey`.
6061 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6062 their_features.supports_anchors_zero_fee_htlc_tx() {
6063 ret.set_anchors_zero_fee_htlc_tx_required();
6069 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6070 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6071 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6072 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6073 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6074 ) -> Result<msgs::OpenChannel, ()>
6076 F::Target: FeeEstimator
6078 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6079 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6080 // We've exhausted our options
6083 // We support opening a few different types of channels. Try removing our additional
6084 // features one by one until we've either arrived at our default or the counterparty has
6087 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6088 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6089 // checks whether the counterparty supports every feature, this would only happen if the
6090 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6092 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6093 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6094 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6095 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6096 } else if self.context.channel_type.supports_scid_privacy() {
6097 self.context.channel_type.clear_scid_privacy();
6099 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6101 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6102 Ok(self.get_open_channel(chain_hash))
6105 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6106 if !self.context.is_outbound() {
6107 panic!("Tried to open a channel for an inbound channel?");
6109 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6110 panic!("Cannot generate an open_channel after we've moved forward");
6113 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6114 panic!("Tried to send an open_channel for a channel that has already advanced");
6117 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6118 let keys = self.context.get_holder_pubkeys();
6122 temporary_channel_id: self.context.channel_id,
6123 funding_satoshis: self.context.channel_value_satoshis,
6124 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6125 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6126 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6127 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6128 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6129 feerate_per_kw: self.context.feerate_per_kw as u32,
6130 to_self_delay: self.context.get_holder_selected_contest_delay(),
6131 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6132 funding_pubkey: keys.funding_pubkey,
6133 revocation_basepoint: keys.revocation_basepoint,
6134 payment_point: keys.payment_point,
6135 delayed_payment_basepoint: keys.delayed_payment_basepoint,
6136 htlc_basepoint: keys.htlc_basepoint,
6137 first_per_commitment_point,
6138 channel_flags: if self.context.config.announced_channel {1} else {0},
6139 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6140 Some(script) => script.clone().into_inner(),
6141 None => Builder::new().into_script(),
6143 channel_type: Some(self.context.channel_type.clone()),
6148 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6149 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6151 // Check sanity of message fields:
6152 if !self.context.is_outbound() {
6153 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6155 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6156 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6158 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6159 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6161 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6162 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6164 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6165 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6167 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6168 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6169 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6171 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6172 if msg.htlc_minimum_msat >= full_channel_value_msat {
6173 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6175 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6176 if msg.to_self_delay > max_delay_acceptable {
6177 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6179 if msg.max_accepted_htlcs < 1 {
6180 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6182 if msg.max_accepted_htlcs > MAX_HTLCS {
6183 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6186 // Now check against optional parameters as set by config...
6187 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6188 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6190 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6191 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6193 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6194 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6196 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6197 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6199 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6200 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6202 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6203 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6205 if msg.minimum_depth > peer_limits.max_minimum_depth {
6206 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6209 if let Some(ty) = &msg.channel_type {
6210 if *ty != self.context.channel_type {
6211 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6213 } else if their_features.supports_channel_type() {
6214 // Assume they've accepted the channel type as they said they understand it.
6216 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6217 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6218 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6220 self.context.channel_type = channel_type.clone();
6221 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6224 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6225 match &msg.shutdown_scriptpubkey {
6226 &Some(ref script) => {
6227 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6228 if script.len() == 0 {
6231 if !script::is_bolt2_compliant(&script, their_features) {
6232 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6234 Some(script.clone())
6237 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6239 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6244 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6245 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6246 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6247 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6248 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6250 if peer_limits.trust_own_funding_0conf {
6251 self.context.minimum_depth = Some(msg.minimum_depth);
6253 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6256 let counterparty_pubkeys = ChannelPublicKeys {
6257 funding_pubkey: msg.funding_pubkey,
6258 revocation_basepoint: msg.revocation_basepoint,
6259 payment_point: msg.payment_point,
6260 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6261 htlc_basepoint: msg.htlc_basepoint
6264 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6265 selected_contest_delay: msg.to_self_delay,
6266 pubkeys: counterparty_pubkeys,
6269 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6270 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6272 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6273 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6279 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6280 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6281 pub context: ChannelContext<SP>,
6282 pub unfunded_context: UnfundedChannelContext,
6285 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6286 /// Creates a new channel from a remote sides' request for one.
6287 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6288 pub fn new<ES: Deref, F: Deref, L: Deref>(
6289 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6290 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6291 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6292 current_chain_height: u32, logger: &L, is_0conf: bool,
6293 ) -> Result<InboundV1Channel<SP>, ChannelError>
6294 where ES::Target: EntropySource,
6295 F::Target: FeeEstimator,
6298 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6300 // First check the channel type is known, failing before we do anything else if we don't
6301 // support this channel type.
6302 let channel_type = if let Some(channel_type) = &msg.channel_type {
6303 if channel_type.supports_any_optional_bits() {
6304 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6307 // We only support the channel types defined by the `ChannelManager` in
6308 // `provided_channel_type_features`. The channel type must always support
6309 // `static_remote_key`.
6310 if !channel_type.requires_static_remote_key() {
6311 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6313 // Make sure we support all of the features behind the channel type.
6314 if !channel_type.is_subset(our_supported_features) {
6315 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6317 if channel_type.requires_scid_privacy() && announced_channel {
6318 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6320 channel_type.clone()
6322 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6323 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6324 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6329 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6330 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6331 let pubkeys = holder_signer.pubkeys().clone();
6332 let counterparty_pubkeys = ChannelPublicKeys {
6333 funding_pubkey: msg.funding_pubkey,
6334 revocation_basepoint: msg.revocation_basepoint,
6335 payment_point: msg.payment_point,
6336 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6337 htlc_basepoint: msg.htlc_basepoint
6340 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6341 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6344 // Check sanity of message fields:
6345 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6346 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6348 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6349 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6351 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6352 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6354 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6355 if msg.push_msat > full_channel_value_msat {
6356 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6358 if msg.dust_limit_satoshis > msg.funding_satoshis {
6359 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6361 if msg.htlc_minimum_msat >= full_channel_value_msat {
6362 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6364 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6366 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6367 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6368 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6370 if msg.max_accepted_htlcs < 1 {
6371 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6373 if msg.max_accepted_htlcs > MAX_HTLCS {
6374 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6377 // Now check against optional parameters as set by config...
6378 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6379 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6381 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6382 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6384 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6385 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6387 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6388 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6390 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6391 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6393 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6394 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6396 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6397 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6400 // Convert things into internal flags and prep our state:
6402 if config.channel_handshake_limits.force_announced_channel_preference {
6403 if config.channel_handshake_config.announced_channel != announced_channel {
6404 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6408 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6409 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6410 // Protocol level safety check in place, although it should never happen because
6411 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6412 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6414 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6415 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6417 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6418 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6419 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6421 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6422 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6425 // check if the funder's amount for the initial commitment tx is sufficient
6426 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6427 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6428 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6432 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6433 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6434 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6435 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6438 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6439 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6440 // want to push much to us), our counterparty should always have more than our reserve.
6441 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6442 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6445 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6446 match &msg.shutdown_scriptpubkey {
6447 &Some(ref script) => {
6448 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6449 if script.len() == 0 {
6452 if !script::is_bolt2_compliant(&script, their_features) {
6453 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6455 Some(script.clone())
6458 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6460 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6465 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6466 match signer_provider.get_shutdown_scriptpubkey() {
6467 Ok(scriptpubkey) => Some(scriptpubkey),
6468 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6472 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6473 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6474 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6478 let destination_script = match signer_provider.get_destination_script() {
6479 Ok(script) => script,
6480 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6483 let mut secp_ctx = Secp256k1::new();
6484 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6486 let minimum_depth = if is_0conf {
6489 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6493 context: ChannelContext {
6496 config: LegacyChannelConfig {
6497 options: config.channel_config.clone(),
6499 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6504 inbound_handshake_limits_override: None,
6506 temporary_channel_id: Some(msg.temporary_channel_id),
6507 channel_id: msg.temporary_channel_id,
6508 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6509 announcement_sigs_state: AnnouncementSigsState::NotSent,
6512 latest_monitor_update_id: 0,
6514 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6515 shutdown_scriptpubkey,
6518 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6519 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6520 value_to_self_msat: msg.push_msat,
6522 pending_inbound_htlcs: Vec::new(),
6523 pending_outbound_htlcs: Vec::new(),
6524 holding_cell_htlc_updates: Vec::new(),
6525 pending_update_fee: None,
6526 holding_cell_update_fee: None,
6527 next_holder_htlc_id: 0,
6528 next_counterparty_htlc_id: 0,
6529 update_time_counter: 1,
6531 resend_order: RAACommitmentOrder::CommitmentFirst,
6533 monitor_pending_channel_ready: false,
6534 monitor_pending_revoke_and_ack: false,
6535 monitor_pending_commitment_signed: false,
6536 monitor_pending_forwards: Vec::new(),
6537 monitor_pending_failures: Vec::new(),
6538 monitor_pending_finalized_fulfills: Vec::new(),
6540 signer_pending_commitment_update: false,
6541 signer_pending_funding: false,
6543 #[cfg(debug_assertions)]
6544 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6545 #[cfg(debug_assertions)]
6546 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6548 last_sent_closing_fee: None,
6549 pending_counterparty_closing_signed: None,
6550 closing_fee_limits: None,
6551 target_closing_feerate_sats_per_kw: None,
6553 funding_tx_confirmed_in: None,
6554 funding_tx_confirmation_height: 0,
6555 short_channel_id: None,
6556 channel_creation_height: current_chain_height,
6558 feerate_per_kw: msg.feerate_per_kw,
6559 channel_value_satoshis: msg.funding_satoshis,
6560 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6561 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6562 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6563 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6564 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6565 holder_selected_channel_reserve_satoshis,
6566 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6567 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6568 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6569 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6572 counterparty_forwarding_info: None,
6574 channel_transaction_parameters: ChannelTransactionParameters {
6575 holder_pubkeys: pubkeys,
6576 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6577 is_outbound_from_holder: false,
6578 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6579 selected_contest_delay: msg.to_self_delay,
6580 pubkeys: counterparty_pubkeys,
6582 funding_outpoint: None,
6583 channel_type_features: channel_type.clone()
6585 funding_transaction: None,
6586 is_batch_funding: None,
6588 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6589 counterparty_prev_commitment_point: None,
6590 counterparty_node_id,
6592 counterparty_shutdown_scriptpubkey,
6594 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6596 channel_update_status: ChannelUpdateStatus::Enabled,
6597 closing_signed_in_flight: false,
6599 announcement_sigs: None,
6601 #[cfg(any(test, fuzzing))]
6602 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6603 #[cfg(any(test, fuzzing))]
6604 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6606 workaround_lnd_bug_4006: None,
6607 sent_message_awaiting_response: None,
6609 latest_inbound_scid_alias: None,
6610 outbound_scid_alias: 0,
6612 channel_pending_event_emitted: false,
6613 channel_ready_event_emitted: false,
6615 #[cfg(any(test, fuzzing))]
6616 historical_inbound_htlc_fulfills: HashSet::new(),
6621 blocked_monitor_updates: Vec::new(),
6623 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6629 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6630 /// should be sent back to the counterparty node.
6632 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6633 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6634 if self.context.is_outbound() {
6635 panic!("Tried to send accept_channel for an outbound channel?");
6637 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6638 panic!("Tried to send accept_channel after channel had moved forward");
6640 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6641 panic!("Tried to send an accept_channel for a channel that has already advanced");
6644 self.generate_accept_channel_message()
6647 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6648 /// inbound channel. If the intention is to accept an inbound channel, use
6649 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6651 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6652 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6653 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6654 let keys = self.context.get_holder_pubkeys();
6656 msgs::AcceptChannel {
6657 temporary_channel_id: self.context.channel_id,
6658 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6659 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6660 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6661 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6662 minimum_depth: self.context.minimum_depth.unwrap(),
6663 to_self_delay: self.context.get_holder_selected_contest_delay(),
6664 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6665 funding_pubkey: keys.funding_pubkey,
6666 revocation_basepoint: keys.revocation_basepoint,
6667 payment_point: keys.payment_point,
6668 delayed_payment_basepoint: keys.delayed_payment_basepoint,
6669 htlc_basepoint: keys.htlc_basepoint,
6670 first_per_commitment_point,
6671 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6672 Some(script) => script.clone().into_inner(),
6673 None => Builder::new().into_script(),
6675 channel_type: Some(self.context.channel_type.clone()),
6677 next_local_nonce: None,
6681 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6682 /// inbound channel without accepting it.
6684 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6686 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6687 self.generate_accept_channel_message()
6690 fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
6691 let funding_script = self.context.get_funding_redeemscript();
6693 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6694 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6696 let trusted_tx = initial_commitment_tx.trust();
6697 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6698 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6699 // They sign the holder commitment transaction...
6700 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6701 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6702 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6703 encode::serialize_hex(&funding_script), &self.context.channel_id());
6704 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6707 let counterparty_keys = self.context.build_remote_transaction_keys();
6708 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6710 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6711 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6712 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6713 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6715 match &self.context.holder_signer {
6716 // TODO (arik): move match into calling method for Taproot
6717 ChannelSignerType::Ecdsa(ecdsa) => {
6718 let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
6719 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
6721 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
6722 Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature))
6727 pub fn funding_created<L: Deref>(
6728 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6729 ) -> Result<(Channel<SP>, msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
6733 if self.context.is_outbound() {
6734 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6736 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6737 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6738 // remember the channel, so it's safe to just send an error_message here and drop the
6740 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6742 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6743 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6744 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6745 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6748 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6749 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6750 // This is an externally observable change before we finish all our checks. In particular
6751 // funding_created_signature may fail.
6752 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6754 let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
6756 Err(ChannelError::Close(e)) => {
6757 self.context.channel_transaction_parameters.funding_outpoint = None;
6758 return Err((self, ChannelError::Close(e)));
6761 // The only error we know how to handle is ChannelError::Close, so we fall over here
6762 // to make sure we don't continue with an inconsistent state.
6763 panic!("unexpected error type from funding_created_signature {:?}", e);
6767 let holder_commitment_tx = HolderCommitmentTransaction::new(
6768 initial_commitment_tx,
6771 &self.context.get_holder_pubkeys().funding_pubkey,
6772 self.context.counterparty_funding_pubkey()
6775 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6776 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6779 // Now that we're past error-generating stuff, update our local state:
6781 let funding_redeemscript = self.context.get_funding_redeemscript();
6782 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6783 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6784 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6785 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6786 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6787 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6788 shutdown_script, self.context.get_holder_selected_contest_delay(),
6789 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6790 &self.context.channel_transaction_parameters,
6791 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6793 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6795 channel_monitor.provide_initial_counterparty_commitment_tx(
6796 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6797 self.context.cur_counterparty_commitment_transaction_number,
6798 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6799 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6800 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6802 self.context.channel_state = ChannelState::FundingSent as u32;
6803 self.context.channel_id = funding_txo.to_channel_id();
6804 self.context.cur_counterparty_commitment_transaction_number -= 1;
6805 self.context.cur_holder_commitment_transaction_number -= 1;
6807 log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
6809 // Promote the channel to a full-fledged one now that we have updated the state and have a
6810 // `ChannelMonitor`.
6811 let mut channel = Channel {
6812 context: self.context,
6814 let channel_id = channel.context.channel_id.clone();
6815 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6816 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6818 Ok((channel, msgs::FundingSigned {
6822 partial_signature_with_nonce: None,
6823 }, channel_monitor))
6827 const SERIALIZATION_VERSION: u8 = 3;
6828 const MIN_SERIALIZATION_VERSION: u8 = 2;
6830 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6836 impl Writeable for ChannelUpdateStatus {
6837 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6838 // We only care about writing out the current state as it was announced, ie only either
6839 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6840 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6842 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6843 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6844 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6845 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6851 impl Readable for ChannelUpdateStatus {
6852 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6853 Ok(match <u8 as Readable>::read(reader)? {
6854 0 => ChannelUpdateStatus::Enabled,
6855 1 => ChannelUpdateStatus::Disabled,
6856 _ => return Err(DecodeError::InvalidValue),
6861 impl Writeable for AnnouncementSigsState {
6862 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6863 // We only care about writing out the current state as if we had just disconnected, at
6864 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6866 AnnouncementSigsState::NotSent => 0u8.write(writer),
6867 AnnouncementSigsState::MessageSent => 0u8.write(writer),
6868 AnnouncementSigsState::Committed => 0u8.write(writer),
6869 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6874 impl Readable for AnnouncementSigsState {
6875 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6876 Ok(match <u8 as Readable>::read(reader)? {
6877 0 => AnnouncementSigsState::NotSent,
6878 1 => AnnouncementSigsState::PeerReceived,
6879 _ => return Err(DecodeError::InvalidValue),
6884 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
6885 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6886 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6889 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6891 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6892 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6893 // the low bytes now and the optional high bytes later.
6894 let user_id_low = self.context.user_id as u64;
6895 user_id_low.write(writer)?;
6897 // Version 1 deserializers expected to read parts of the config object here. Version 2
6898 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6899 // `minimum_depth` we simply write dummy values here.
6900 writer.write_all(&[0; 8])?;
6902 self.context.channel_id.write(writer)?;
6903 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6904 self.context.channel_value_satoshis.write(writer)?;
6906 self.context.latest_monitor_update_id.write(writer)?;
6908 let mut key_data = VecWriter(Vec::new());
6909 // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
6910 self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
6911 assert!(key_data.0.len() < core::usize::MAX);
6912 assert!(key_data.0.len() < core::u32::MAX as usize);
6913 (key_data.0.len() as u32).write(writer)?;
6914 writer.write_all(&key_data.0[..])?;
6916 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6917 // deserialized from that format.
6918 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6919 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6920 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6922 self.context.destination_script.write(writer)?;
6924 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6925 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6926 self.context.value_to_self_msat.write(writer)?;
6928 let mut dropped_inbound_htlcs = 0;
6929 for htlc in self.context.pending_inbound_htlcs.iter() {
6930 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6931 dropped_inbound_htlcs += 1;
6934 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6935 for htlc in self.context.pending_inbound_htlcs.iter() {
6936 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6939 htlc.htlc_id.write(writer)?;
6940 htlc.amount_msat.write(writer)?;
6941 htlc.cltv_expiry.write(writer)?;
6942 htlc.payment_hash.write(writer)?;
6944 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6945 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6947 htlc_state.write(writer)?;
6949 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6951 htlc_state.write(writer)?;
6953 &InboundHTLCState::Committed => {
6956 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6958 removal_reason.write(writer)?;
6963 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6964 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
6966 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6967 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
6968 htlc.htlc_id.write(writer)?;
6969 htlc.amount_msat.write(writer)?;
6970 htlc.cltv_expiry.write(writer)?;
6971 htlc.payment_hash.write(writer)?;
6972 htlc.source.write(writer)?;
6974 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6976 onion_packet.write(writer)?;
6978 &OutboundHTLCState::Committed => {
6981 &OutboundHTLCState::RemoteRemoved(_) => {
6982 // Treat this as a Committed because we haven't received the CS - they'll
6983 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6986 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6988 if let OutboundHTLCOutcome::Success(preimage) = outcome {
6989 preimages.push(preimage);
6991 let reason: Option<&HTLCFailReason> = outcome.into();
6992 reason.write(writer)?;
6994 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6996 if let OutboundHTLCOutcome::Success(preimage) = outcome {
6997 preimages.push(preimage);
6999 let reason: Option<&HTLCFailReason> = outcome.into();
7000 reason.write(writer)?;
7003 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7004 if pending_outbound_skimmed_fees.is_empty() {
7005 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7007 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7008 } else if !pending_outbound_skimmed_fees.is_empty() {
7009 pending_outbound_skimmed_fees.push(None);
7013 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7014 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7015 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7017 &HTLCUpdateAwaitingACK::AddHTLC {
7018 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7022 amount_msat.write(writer)?;
7023 cltv_expiry.write(writer)?;
7024 payment_hash.write(writer)?;
7025 source.write(writer)?;
7026 onion_routing_packet.write(writer)?;
7028 if let Some(skimmed_fee) = skimmed_fee_msat {
7029 if holding_cell_skimmed_fees.is_empty() {
7030 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7032 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7033 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7035 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7037 payment_preimage.write(writer)?;
7038 htlc_id.write(writer)?;
7040 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7042 htlc_id.write(writer)?;
7043 err_packet.write(writer)?;
7048 match self.context.resend_order {
7049 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7050 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7053 self.context.monitor_pending_channel_ready.write(writer)?;
7054 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7055 self.context.monitor_pending_commitment_signed.write(writer)?;
7057 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7058 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7059 pending_forward.write(writer)?;
7060 htlc_id.write(writer)?;
7063 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7064 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7065 htlc_source.write(writer)?;
7066 payment_hash.write(writer)?;
7067 fail_reason.write(writer)?;
7070 if self.context.is_outbound() {
7071 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7072 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7073 Some(feerate).write(writer)?;
7075 // As for inbound HTLCs, if the update was only announced and never committed in a
7076 // commitment_signed, drop it.
7077 None::<u32>.write(writer)?;
7079 self.context.holding_cell_update_fee.write(writer)?;
7081 self.context.next_holder_htlc_id.write(writer)?;
7082 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7083 self.context.update_time_counter.write(writer)?;
7084 self.context.feerate_per_kw.write(writer)?;
7086 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7087 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7088 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7089 // consider the stale state on reload.
7092 self.context.funding_tx_confirmed_in.write(writer)?;
7093 self.context.funding_tx_confirmation_height.write(writer)?;
7094 self.context.short_channel_id.write(writer)?;
7096 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7097 self.context.holder_dust_limit_satoshis.write(writer)?;
7098 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7100 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7101 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7103 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7104 self.context.holder_htlc_minimum_msat.write(writer)?;
7105 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7107 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7108 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7110 match &self.context.counterparty_forwarding_info {
7113 info.fee_base_msat.write(writer)?;
7114 info.fee_proportional_millionths.write(writer)?;
7115 info.cltv_expiry_delta.write(writer)?;
7117 None => 0u8.write(writer)?
7120 self.context.channel_transaction_parameters.write(writer)?;
7121 self.context.funding_transaction.write(writer)?;
7123 self.context.counterparty_cur_commitment_point.write(writer)?;
7124 self.context.counterparty_prev_commitment_point.write(writer)?;
7125 self.context.counterparty_node_id.write(writer)?;
7127 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7129 self.context.commitment_secrets.write(writer)?;
7131 self.context.channel_update_status.write(writer)?;
7133 #[cfg(any(test, fuzzing))]
7134 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7135 #[cfg(any(test, fuzzing))]
7136 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7137 htlc.write(writer)?;
7140 // If the channel type is something other than only-static-remote-key, then we need to have
7141 // older clients fail to deserialize this channel at all. If the type is
7142 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7144 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7145 Some(&self.context.channel_type) } else { None };
7147 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7148 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7149 // a different percentage of the channel value then 10%, which older versions of LDK used
7150 // to set it to before the percentage was made configurable.
7151 let serialized_holder_selected_reserve =
7152 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7153 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7155 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7156 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7157 let serialized_holder_htlc_max_in_flight =
7158 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7159 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7161 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7162 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7164 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7165 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7166 // we write the high bytes as an option here.
7167 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7169 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7171 write_tlv_fields!(writer, {
7172 (0, self.context.announcement_sigs, option),
7173 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7174 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7175 // them twice, once with their original default values above, and once as an option
7176 // here. On the read side, old versions will simply ignore the odd-type entries here,
7177 // and new versions map the default values to None and allow the TLV entries here to
7179 (1, self.context.minimum_depth, option),
7180 (2, chan_type, option),
7181 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7182 (4, serialized_holder_selected_reserve, option),
7183 (5, self.context.config, required),
7184 (6, serialized_holder_htlc_max_in_flight, option),
7185 (7, self.context.shutdown_scriptpubkey, option),
7186 (8, self.context.blocked_monitor_updates, optional_vec),
7187 (9, self.context.target_closing_feerate_sats_per_kw, option),
7188 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7189 (13, self.context.channel_creation_height, required),
7190 (15, preimages, required_vec),
7191 (17, self.context.announcement_sigs_state, required),
7192 (19, self.context.latest_inbound_scid_alias, option),
7193 (21, self.context.outbound_scid_alias, required),
7194 (23, channel_ready_event_emitted, option),
7195 (25, user_id_high_opt, option),
7196 (27, self.context.channel_keys_id, required),
7197 (28, holder_max_accepted_htlcs, option),
7198 (29, self.context.temporary_channel_id, option),
7199 (31, channel_pending_event_emitted, option),
7200 (35, pending_outbound_skimmed_fees, optional_vec),
7201 (37, holding_cell_skimmed_fees, optional_vec),
7202 (38, self.context.is_batch_funding, option),
7209 const MAX_ALLOC_SIZE: usize = 64*1024;
7210 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7212 ES::Target: EntropySource,
7213 SP::Target: SignerProvider
7215 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7216 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7217 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7219 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7220 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7221 // the low bytes now and the high bytes later.
7222 let user_id_low: u64 = Readable::read(reader)?;
7224 let mut config = Some(LegacyChannelConfig::default());
7226 // Read the old serialization of the ChannelConfig from version 0.0.98.
7227 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7228 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7229 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7230 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7232 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7233 let mut _val: u64 = Readable::read(reader)?;
7236 let channel_id = Readable::read(reader)?;
7237 let channel_state = Readable::read(reader)?;
7238 let channel_value_satoshis = Readable::read(reader)?;
7240 let latest_monitor_update_id = Readable::read(reader)?;
7242 let mut keys_data = None;
7244 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7245 // the `channel_keys_id` TLV is present below.
7246 let keys_len: u32 = Readable::read(reader)?;
7247 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7248 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7249 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7250 let mut data = [0; 1024];
7251 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7252 reader.read_exact(read_slice)?;
7253 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7257 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7258 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7259 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7262 let destination_script = Readable::read(reader)?;
7264 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7265 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7266 let value_to_self_msat = Readable::read(reader)?;
7268 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7270 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7271 for _ in 0..pending_inbound_htlc_count {
7272 pending_inbound_htlcs.push(InboundHTLCOutput {
7273 htlc_id: Readable::read(reader)?,
7274 amount_msat: Readable::read(reader)?,
7275 cltv_expiry: Readable::read(reader)?,
7276 payment_hash: Readable::read(reader)?,
7277 state: match <u8 as Readable>::read(reader)? {
7278 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7279 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7280 3 => InboundHTLCState::Committed,
7281 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7282 _ => return Err(DecodeError::InvalidValue),
7287 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7288 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7289 for _ in 0..pending_outbound_htlc_count {
7290 pending_outbound_htlcs.push(OutboundHTLCOutput {
7291 htlc_id: Readable::read(reader)?,
7292 amount_msat: Readable::read(reader)?,
7293 cltv_expiry: Readable::read(reader)?,
7294 payment_hash: Readable::read(reader)?,
7295 source: Readable::read(reader)?,
7296 state: match <u8 as Readable>::read(reader)? {
7297 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7298 1 => OutboundHTLCState::Committed,
7300 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7301 OutboundHTLCState::RemoteRemoved(option.into())
7304 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7305 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7308 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7309 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7311 _ => return Err(DecodeError::InvalidValue),
7313 skimmed_fee_msat: None,
7317 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7318 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7319 for _ in 0..holding_cell_htlc_update_count {
7320 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7321 0 => HTLCUpdateAwaitingACK::AddHTLC {
7322 amount_msat: Readable::read(reader)?,
7323 cltv_expiry: Readable::read(reader)?,
7324 payment_hash: Readable::read(reader)?,
7325 source: Readable::read(reader)?,
7326 onion_routing_packet: Readable::read(reader)?,
7327 skimmed_fee_msat: None,
7329 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7330 payment_preimage: Readable::read(reader)?,
7331 htlc_id: Readable::read(reader)?,
7333 2 => HTLCUpdateAwaitingACK::FailHTLC {
7334 htlc_id: Readable::read(reader)?,
7335 err_packet: Readable::read(reader)?,
7337 _ => return Err(DecodeError::InvalidValue),
7341 let resend_order = match <u8 as Readable>::read(reader)? {
7342 0 => RAACommitmentOrder::CommitmentFirst,
7343 1 => RAACommitmentOrder::RevokeAndACKFirst,
7344 _ => return Err(DecodeError::InvalidValue),
7347 let monitor_pending_channel_ready = Readable::read(reader)?;
7348 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7349 let monitor_pending_commitment_signed = Readable::read(reader)?;
7351 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7352 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7353 for _ in 0..monitor_pending_forwards_count {
7354 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7357 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7358 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7359 for _ in 0..monitor_pending_failures_count {
7360 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7363 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7365 let holding_cell_update_fee = Readable::read(reader)?;
7367 let next_holder_htlc_id = Readable::read(reader)?;
7368 let next_counterparty_htlc_id = Readable::read(reader)?;
7369 let update_time_counter = Readable::read(reader)?;
7370 let feerate_per_kw = Readable::read(reader)?;
7372 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7373 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7374 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7375 // consider the stale state on reload.
7376 match <u8 as Readable>::read(reader)? {
7379 let _: u32 = Readable::read(reader)?;
7380 let _: u64 = Readable::read(reader)?;
7381 let _: Signature = Readable::read(reader)?;
7383 _ => return Err(DecodeError::InvalidValue),
7386 let funding_tx_confirmed_in = Readable::read(reader)?;
7387 let funding_tx_confirmation_height = Readable::read(reader)?;
7388 let short_channel_id = Readable::read(reader)?;
7390 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7391 let holder_dust_limit_satoshis = Readable::read(reader)?;
7392 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7393 let mut counterparty_selected_channel_reserve_satoshis = None;
7395 // Read the old serialization from version 0.0.98.
7396 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7398 // Read the 8 bytes of backwards-compatibility data.
7399 let _dummy: u64 = Readable::read(reader)?;
7401 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7402 let holder_htlc_minimum_msat = Readable::read(reader)?;
7403 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7405 let mut minimum_depth = None;
7407 // Read the old serialization from version 0.0.98.
7408 minimum_depth = Some(Readable::read(reader)?);
7410 // Read the 4 bytes of backwards-compatibility data.
7411 let _dummy: u32 = Readable::read(reader)?;
7414 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7416 1 => Some(CounterpartyForwardingInfo {
7417 fee_base_msat: Readable::read(reader)?,
7418 fee_proportional_millionths: Readable::read(reader)?,
7419 cltv_expiry_delta: Readable::read(reader)?,
7421 _ => return Err(DecodeError::InvalidValue),
7424 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7425 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7427 let counterparty_cur_commitment_point = Readable::read(reader)?;
7429 let counterparty_prev_commitment_point = Readable::read(reader)?;
7430 let counterparty_node_id = Readable::read(reader)?;
7432 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7433 let commitment_secrets = Readable::read(reader)?;
7435 let channel_update_status = Readable::read(reader)?;
7437 #[cfg(any(test, fuzzing))]
7438 let mut historical_inbound_htlc_fulfills = HashSet::new();
7439 #[cfg(any(test, fuzzing))]
7441 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7442 for _ in 0..htlc_fulfills_len {
7443 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7447 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7448 Some((feerate, if channel_parameters.is_outbound_from_holder {
7449 FeeUpdateState::Outbound
7451 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7457 let mut announcement_sigs = None;
7458 let mut target_closing_feerate_sats_per_kw = None;
7459 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7460 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7461 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7462 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7463 // only, so we default to that if none was written.
7464 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7465 let mut channel_creation_height = Some(serialized_height);
7466 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7468 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7469 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7470 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7471 let mut latest_inbound_scid_alias = None;
7472 let mut outbound_scid_alias = None;
7473 let mut channel_pending_event_emitted = None;
7474 let mut channel_ready_event_emitted = None;
7476 let mut user_id_high_opt: Option<u64> = None;
7477 let mut channel_keys_id: Option<[u8; 32]> = None;
7478 let mut temporary_channel_id: Option<ChannelId> = None;
7479 let mut holder_max_accepted_htlcs: Option<u16> = None;
7481 let mut blocked_monitor_updates = Some(Vec::new());
7483 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7484 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7486 let mut is_batch_funding: Option<()> = None;
7488 read_tlv_fields!(reader, {
7489 (0, announcement_sigs, option),
7490 (1, minimum_depth, option),
7491 (2, channel_type, option),
7492 (3, counterparty_selected_channel_reserve_satoshis, option),
7493 (4, holder_selected_channel_reserve_satoshis, option),
7494 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7495 (6, holder_max_htlc_value_in_flight_msat, option),
7496 (7, shutdown_scriptpubkey, option),
7497 (8, blocked_monitor_updates, optional_vec),
7498 (9, target_closing_feerate_sats_per_kw, option),
7499 (11, monitor_pending_finalized_fulfills, optional_vec),
7500 (13, channel_creation_height, option),
7501 (15, preimages_opt, optional_vec),
7502 (17, announcement_sigs_state, option),
7503 (19, latest_inbound_scid_alias, option),
7504 (21, outbound_scid_alias, option),
7505 (23, channel_ready_event_emitted, option),
7506 (25, user_id_high_opt, option),
7507 (27, channel_keys_id, option),
7508 (28, holder_max_accepted_htlcs, option),
7509 (29, temporary_channel_id, option),
7510 (31, channel_pending_event_emitted, option),
7511 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7512 (37, holding_cell_skimmed_fees_opt, optional_vec),
7513 (38, is_batch_funding, option),
7516 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7517 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7518 // If we've gotten to the funding stage of the channel, populate the signer with its
7519 // required channel parameters.
7520 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7521 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7522 holder_signer.provide_channel_parameters(&channel_parameters);
7524 (channel_keys_id, holder_signer)
7526 // `keys_data` can be `None` if we had corrupted data.
7527 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7528 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7529 (holder_signer.channel_keys_id(), holder_signer)
7532 if let Some(preimages) = preimages_opt {
7533 let mut iter = preimages.into_iter();
7534 for htlc in pending_outbound_htlcs.iter_mut() {
7536 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7537 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7539 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7540 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7545 // We expect all preimages to be consumed above
7546 if iter.next().is_some() {
7547 return Err(DecodeError::InvalidValue);
7551 let chan_features = channel_type.as_ref().unwrap();
7552 if !chan_features.is_subset(our_supported_features) {
7553 // If the channel was written by a new version and negotiated with features we don't
7554 // understand yet, refuse to read it.
7555 return Err(DecodeError::UnknownRequiredFeature);
7558 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7559 // To account for that, we're proactively setting/overriding the field here.
7560 channel_parameters.channel_type_features = chan_features.clone();
7562 let mut secp_ctx = Secp256k1::new();
7563 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7565 // `user_id` used to be a single u64 value. In order to remain backwards
7566 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7567 // separate u64 values.
7568 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7570 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7572 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7573 let mut iter = skimmed_fees.into_iter();
7574 for htlc in pending_outbound_htlcs.iter_mut() {
7575 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7577 // We expect all skimmed fees to be consumed above
7578 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7580 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7581 let mut iter = skimmed_fees.into_iter();
7582 for htlc in holding_cell_htlc_updates.iter_mut() {
7583 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7584 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7587 // We expect all skimmed fees to be consumed above
7588 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7592 context: ChannelContext {
7595 config: config.unwrap(),
7599 // Note that we don't care about serializing handshake limits as we only ever serialize
7600 // channel data after the handshake has completed.
7601 inbound_handshake_limits_override: None,
7604 temporary_channel_id,
7606 announcement_sigs_state: announcement_sigs_state.unwrap(),
7608 channel_value_satoshis,
7610 latest_monitor_update_id,
7612 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7613 shutdown_scriptpubkey,
7616 cur_holder_commitment_transaction_number,
7617 cur_counterparty_commitment_transaction_number,
7620 holder_max_accepted_htlcs,
7621 pending_inbound_htlcs,
7622 pending_outbound_htlcs,
7623 holding_cell_htlc_updates,
7627 monitor_pending_channel_ready,
7628 monitor_pending_revoke_and_ack,
7629 monitor_pending_commitment_signed,
7630 monitor_pending_forwards,
7631 monitor_pending_failures,
7632 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7634 signer_pending_commitment_update: false,
7635 signer_pending_funding: false,
7638 holding_cell_update_fee,
7639 next_holder_htlc_id,
7640 next_counterparty_htlc_id,
7641 update_time_counter,
7644 #[cfg(debug_assertions)]
7645 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7646 #[cfg(debug_assertions)]
7647 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7649 last_sent_closing_fee: None,
7650 pending_counterparty_closing_signed: None,
7651 closing_fee_limits: None,
7652 target_closing_feerate_sats_per_kw,
7654 funding_tx_confirmed_in,
7655 funding_tx_confirmation_height,
7657 channel_creation_height: channel_creation_height.unwrap(),
7659 counterparty_dust_limit_satoshis,
7660 holder_dust_limit_satoshis,
7661 counterparty_max_htlc_value_in_flight_msat,
7662 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7663 counterparty_selected_channel_reserve_satoshis,
7664 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7665 counterparty_htlc_minimum_msat,
7666 holder_htlc_minimum_msat,
7667 counterparty_max_accepted_htlcs,
7670 counterparty_forwarding_info,
7672 channel_transaction_parameters: channel_parameters,
7673 funding_transaction,
7676 counterparty_cur_commitment_point,
7677 counterparty_prev_commitment_point,
7678 counterparty_node_id,
7680 counterparty_shutdown_scriptpubkey,
7684 channel_update_status,
7685 closing_signed_in_flight: false,
7689 #[cfg(any(test, fuzzing))]
7690 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7691 #[cfg(any(test, fuzzing))]
7692 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7694 workaround_lnd_bug_4006: None,
7695 sent_message_awaiting_response: None,
7697 latest_inbound_scid_alias,
7698 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7699 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7701 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7702 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7704 #[cfg(any(test, fuzzing))]
7705 historical_inbound_htlc_fulfills,
7707 channel_type: channel_type.unwrap(),
7710 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7719 use bitcoin::blockdata::constants::ChainHash;
7720 use bitcoin::blockdata::script::{Script, Builder};
7721 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7722 use bitcoin::blockdata::opcodes;
7723 use bitcoin::network::constants::Network;
7725 use crate::ln::PaymentHash;
7726 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7727 use crate::ln::channel::InitFeatures;
7728 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7729 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7730 use crate::ln::features::ChannelTypeFeatures;
7731 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7732 use crate::ln::script::ShutdownScript;
7733 use crate::ln::chan_utils;
7734 use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7735 use crate::chain::BestBlock;
7736 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7737 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7738 use crate::chain::transaction::OutPoint;
7739 use crate::routing::router::Path;
7740 use crate::util::config::UserConfig;
7741 use crate::util::errors::APIError;
7742 use crate::util::test_utils;
7743 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7744 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7745 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7746 use bitcoin::secp256k1::{SecretKey,PublicKey};
7747 use bitcoin::hashes::sha256::Hash as Sha256;
7748 use bitcoin::hashes::Hash;
7749 use bitcoin::hash_types::WPubkeyHash;
7750 use bitcoin::PackedLockTime;
7751 use bitcoin::util::address::WitnessVersion;
7752 use crate::prelude::*;
7754 struct TestFeeEstimator {
7757 impl FeeEstimator for TestFeeEstimator {
7758 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7764 fn test_max_funding_satoshis_no_wumbo() {
7765 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7766 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7767 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7771 fn test_no_fee_check_overflow() {
7772 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7773 // arithmetic, causing a panic with debug assertions enabled.
7774 let fee_est = TestFeeEstimator { fee_est: 42 };
7775 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7776 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
7777 &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
7778 u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7782 signer: InMemorySigner,
7785 impl EntropySource for Keys {
7786 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7789 impl SignerProvider for Keys {
7790 type Signer = InMemorySigner;
7792 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7793 self.signer.channel_keys_id()
7796 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7800 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7802 fn get_destination_script(&self) -> Result<Script, ()> {
7803 let secp_ctx = Secp256k1::signing_only();
7804 let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7805 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7806 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7809 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7810 let secp_ctx = Secp256k1::signing_only();
7811 let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7812 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7816 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7817 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7818 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7822 fn upfront_shutdown_script_incompatibility() {
7823 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7824 let non_v0_segwit_shutdown_script =
7825 ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7827 let seed = [42; 32];
7828 let network = Network::Testnet;
7829 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7830 keys_provider.expect(OnGetShutdownScriptpubkey {
7831 returns: non_v0_segwit_shutdown_script.clone(),
7834 let secp_ctx = Secp256k1::new();
7835 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7836 let config = UserConfig::default();
7837 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7838 Err(APIError::IncompatibleShutdownScript { script }) => {
7839 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7841 Err(e) => panic!("Unexpected error: {:?}", e),
7842 Ok(_) => panic!("Expected error"),
7846 // Check that, during channel creation, we use the same feerate in the open channel message
7847 // as we do in the Channel object creation itself.
7849 fn test_open_channel_msg_fee() {
7850 let original_fee = 253;
7851 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7852 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7853 let secp_ctx = Secp256k1::new();
7854 let seed = [42; 32];
7855 let network = Network::Testnet;
7856 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7858 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7859 let config = UserConfig::default();
7860 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7862 // Now change the fee so we can check that the fee in the open_channel message is the
7863 // same as the old fee.
7864 fee_est.fee_est = 500;
7865 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7866 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7870 fn test_holder_vs_counterparty_dust_limit() {
7871 // Test that when calculating the local and remote commitment transaction fees, the correct
7872 // dust limits are used.
7873 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7874 let secp_ctx = Secp256k1::new();
7875 let seed = [42; 32];
7876 let network = Network::Testnet;
7877 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7878 let logger = test_utils::TestLogger::new();
7879 let best_block = BestBlock::from_network(network);
7881 // Go through the flow of opening a channel between two nodes, making sure
7882 // they have different dust limits.
7884 // Create Node A's channel pointing to Node B's pubkey
7885 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7886 let config = UserConfig::default();
7887 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7889 // Create Node B's channel by receiving Node A's open_channel message
7890 // Make sure A's dust limit is as we expect.
7891 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7892 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7893 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7895 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7896 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
7897 accept_channel_msg.dust_limit_satoshis = 546;
7898 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7899 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7901 // Node A --> Node B: funding created
7902 let output_script = node_a_chan.context.get_funding_redeemscript();
7903 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7904 value: 10000000, script_pubkey: output_script.clone(),
7906 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7907 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7908 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7910 // Node B --> Node A: funding signed
7911 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7913 // Put some inbound and outbound HTLCs in A's channel.
7914 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7915 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7917 amount_msat: htlc_amount_msat,
7918 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7919 cltv_expiry: 300000000,
7920 state: InboundHTLCState::Committed,
7923 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7925 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7926 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7927 cltv_expiry: 200000000,
7928 state: OutboundHTLCState::Committed,
7929 source: HTLCSource::OutboundRoute {
7930 path: Path { hops: Vec::new(), blinded_tail: None },
7931 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7932 first_hop_htlc_msat: 548,
7933 payment_id: PaymentId([42; 32]),
7935 skimmed_fee_msat: None,
7938 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7939 // the dust limit check.
7940 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7941 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7942 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
7943 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7945 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7946 // of the HTLCs are seen to be above the dust limit.
7947 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7948 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
7949 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7950 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7951 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7955 fn test_timeout_vs_success_htlc_dust_limit() {
7956 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7957 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7958 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7959 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7960 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7961 let secp_ctx = Secp256k1::new();
7962 let seed = [42; 32];
7963 let network = Network::Testnet;
7964 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7966 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7967 let config = UserConfig::default();
7968 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7970 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
7971 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
7973 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7974 // counted as dust when it shouldn't be.
7975 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7976 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7977 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7978 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7980 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7981 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7982 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7983 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7984 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7986 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7988 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7989 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7990 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7991 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7992 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7994 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7995 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7996 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7997 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7998 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8002 fn channel_reestablish_no_updates() {
8003 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8004 let logger = test_utils::TestLogger::new();
8005 let secp_ctx = Secp256k1::new();
8006 let seed = [42; 32];
8007 let network = Network::Testnet;
8008 let best_block = BestBlock::from_network(network);
8009 let chain_hash = ChainHash::using_genesis_block(network);
8010 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8012 // Go through the flow of opening a channel between two nodes.
8014 // Create Node A's channel pointing to Node B's pubkey
8015 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8016 let config = UserConfig::default();
8017 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8019 // Create Node B's channel by receiving Node A's open_channel message
8020 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8021 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8022 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8024 // Node B --> Node A: accept channel
8025 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8026 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8028 // Node A --> Node B: funding created
8029 let output_script = node_a_chan.context.get_funding_redeemscript();
8030 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8031 value: 10000000, script_pubkey: output_script.clone(),
8033 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8034 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8035 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8037 // Node B --> Node A: funding signed
8038 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8040 // Now disconnect the two nodes and check that the commitment point in
8041 // Node B's channel_reestablish message is sane.
8042 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8043 let msg = node_b_chan.get_channel_reestablish(&&logger);
8044 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8045 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8046 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8048 // Check that the commitment point in Node A's channel_reestablish message
8050 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8051 let msg = node_a_chan.get_channel_reestablish(&&logger);
8052 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8053 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8054 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8058 fn test_configured_holder_max_htlc_value_in_flight() {
8059 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8060 let logger = test_utils::TestLogger::new();
8061 let secp_ctx = Secp256k1::new();
8062 let seed = [42; 32];
8063 let network = Network::Testnet;
8064 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8065 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8066 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8068 let mut config_2_percent = UserConfig::default();
8069 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8070 let mut config_99_percent = UserConfig::default();
8071 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8072 let mut config_0_percent = UserConfig::default();
8073 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8074 let mut config_101_percent = UserConfig::default();
8075 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8077 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8078 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8079 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8080 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
8081 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8082 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8084 // Test with the upper bound - 1 of valid values (99%).
8085 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
8086 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8087 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8089 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8091 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8092 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8093 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8094 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8095 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8096 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8098 // Test with the upper bound - 1 of valid values (99%).
8099 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8100 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8101 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8103 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8104 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8105 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
8106 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8107 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8109 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8110 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8112 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
8113 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8114 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8116 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8117 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8118 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8119 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8120 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8122 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8123 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8125 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8126 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8127 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8131 fn test_configured_holder_selected_channel_reserve_satoshis() {
8133 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8134 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8135 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8137 // Test with valid but unreasonably high channel reserves
8138 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8139 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8140 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8142 // Test with calculated channel reserve less than lower bound
8143 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8144 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8146 // Test with invalid channel reserves since sum of both is greater than or equal
8148 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8149 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8152 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8153 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8154 let logger = test_utils::TestLogger::new();
8155 let secp_ctx = Secp256k1::new();
8156 let seed = [42; 32];
8157 let network = Network::Testnet;
8158 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8159 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8160 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8163 let mut outbound_node_config = UserConfig::default();
8164 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8165 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
8167 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8168 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8170 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8171 let mut inbound_node_config = UserConfig::default();
8172 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8174 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8175 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8177 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8179 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8180 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8182 // Channel Negotiations failed
8183 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8184 assert!(result.is_err());
8189 fn channel_update() {
8190 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8191 let logger = test_utils::TestLogger::new();
8192 let secp_ctx = Secp256k1::new();
8193 let seed = [42; 32];
8194 let network = Network::Testnet;
8195 let best_block = BestBlock::from_network(network);
8196 let chain_hash = ChainHash::using_genesis_block(network);
8197 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8199 // Create Node A's channel pointing to Node B's pubkey
8200 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8201 let config = UserConfig::default();
8202 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8204 // Create Node B's channel by receiving Node A's open_channel message
8205 // Make sure A's dust limit is as we expect.
8206 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8207 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8208 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8210 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8211 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8212 accept_channel_msg.dust_limit_satoshis = 546;
8213 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8214 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8216 // Node A --> Node B: funding created
8217 let output_script = node_a_chan.context.get_funding_redeemscript();
8218 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8219 value: 10000000, script_pubkey: output_script.clone(),
8221 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8222 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8223 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8225 // Node B --> Node A: funding signed
8226 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8228 // Make sure that receiving a channel update will update the Channel as expected.
8229 let update = ChannelUpdate {
8230 contents: UnsignedChannelUpdate {
8232 short_channel_id: 0,
8235 cltv_expiry_delta: 100,
8236 htlc_minimum_msat: 5,
8237 htlc_maximum_msat: MAX_VALUE_MSAT,
8239 fee_proportional_millionths: 11,
8240 excess_data: Vec::new(),
8242 signature: Signature::from(unsafe { FFISignature::new() })
8244 assert!(node_a_chan.channel_update(&update).unwrap());
8246 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8247 // change our official htlc_minimum_msat.
8248 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8249 match node_a_chan.context.counterparty_forwarding_info() {
8251 assert_eq!(info.cltv_expiry_delta, 100);
8252 assert_eq!(info.fee_base_msat, 110);
8253 assert_eq!(info.fee_proportional_millionths, 11);
8255 None => panic!("expected counterparty forwarding info to be Some")
8258 assert!(!node_a_chan.channel_update(&update).unwrap());
8261 #[cfg(feature = "_test_vectors")]
8263 fn outbound_commitment_test() {
8264 use bitcoin::util::sighash;
8265 use bitcoin::consensus::encode::serialize;
8266 use bitcoin::blockdata::transaction::EcdsaSighashType;
8267 use bitcoin::hashes::hex::FromHex;
8268 use bitcoin::hash_types::Txid;
8269 use bitcoin::secp256k1::Message;
8270 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
8271 use crate::ln::PaymentPreimage;
8272 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8273 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8274 use crate::util::logger::Logger;
8275 use crate::sync::Arc;
8277 // Test vectors from BOLT 3 Appendices C and F (anchors):
8278 let feeest = TestFeeEstimator{fee_est: 15000};
8279 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8280 let secp_ctx = Secp256k1::new();
8282 let mut signer = InMemorySigner::new(
8284 SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8285 SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8286 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8287 SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8288 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8290 // These aren't set in the test vectors:
8291 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8297 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8298 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8299 let keys_provider = Keys { signer: signer.clone() };
8301 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8302 let mut config = UserConfig::default();
8303 config.channel_handshake_config.announced_channel = false;
8304 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
8305 chan.context.holder_dust_limit_satoshis = 546;
8306 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8308 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8310 let counterparty_pubkeys = ChannelPublicKeys {
8311 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8312 revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8313 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8314 delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8315 htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8317 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8318 CounterpartyChannelTransactionParameters {
8319 pubkeys: counterparty_pubkeys.clone(),
8320 selected_contest_delay: 144
8322 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8323 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8325 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8326 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8328 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8329 hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8331 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8332 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8334 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8335 // derived from a commitment_seed, so instead we copy it here and call
8336 // build_commitment_transaction.
8337 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8338 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8339 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8340 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8341 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8343 macro_rules! test_commitment {
8344 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8345 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8346 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8350 macro_rules! test_commitment_with_anchors {
8351 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8352 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8353 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8357 macro_rules! test_commitment_common {
8358 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8359 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8361 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8362 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8364 let htlcs = commitment_stats.htlcs_included.drain(..)
8365 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8367 (commitment_stats.tx, htlcs)
8369 let trusted_tx = commitment_tx.trust();
8370 let unsigned_tx = trusted_tx.built_transaction();
8371 let redeemscript = chan.context.get_funding_redeemscript();
8372 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8373 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8374 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8375 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8377 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8378 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8379 let mut counterparty_htlc_sigs = Vec::new();
8380 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8382 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8383 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8384 counterparty_htlc_sigs.push(remote_signature);
8386 assert_eq!(htlcs.len(), per_htlc.len());
8388 let holder_commitment_tx = HolderCommitmentTransaction::new(
8389 commitment_tx.clone(),
8390 counterparty_signature,
8391 counterparty_htlc_sigs,
8392 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8393 chan.context.counterparty_funding_pubkey()
8395 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8396 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8398 let funding_redeemscript = chan.context.get_funding_redeemscript();
8399 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8400 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8402 // ((htlc, counterparty_sig), (index, holder_sig))
8403 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8406 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8407 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8409 let ref htlc = htlcs[$htlc_idx];
8410 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8411 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8412 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8413 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8414 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8415 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8416 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8418 let mut preimage: Option<PaymentPreimage> = None;
8421 let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8422 if out == htlc.payment_hash {
8423 preimage = Some(PaymentPreimage([i; 32]));
8427 assert!(preimage.is_some());
8430 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8431 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8432 channel_derivation_parameters: ChannelDerivationParameters {
8433 value_satoshis: chan.context.channel_value_satoshis,
8434 keys_id: chan.context.channel_keys_id,
8435 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8437 commitment_txid: trusted_tx.txid(),
8438 per_commitment_number: trusted_tx.commitment_number(),
8439 per_commitment_point: trusted_tx.per_commitment_point(),
8440 feerate_per_kw: trusted_tx.feerate_per_kw(),
8442 preimage: preimage.clone(),
8443 counterparty_sig: *htlc_counterparty_sig,
8444 }, &secp_ctx).unwrap();
8445 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8446 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8448 let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8449 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8450 let trusted_tx = holder_commitment_tx.trust();
8451 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8452 log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
8453 assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8455 assert!(htlc_counterparty_sig_iter.next().is_none());
8459 // anchors: simple commitment tx with no HTLCs and single anchor
8460 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8461 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8462 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8464 // simple commitment tx with no HTLCs
8465 chan.context.value_to_self_msat = 7000000000;
8467 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8468 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8469 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8471 // anchors: simple commitment tx with no HTLCs
8472 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8473 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8474 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8476 chan.context.pending_inbound_htlcs.push({
8477 let mut out = InboundHTLCOutput{
8479 amount_msat: 1000000,
8481 payment_hash: PaymentHash([0; 32]),
8482 state: InboundHTLCState::Committed,
8484 out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8487 chan.context.pending_inbound_htlcs.push({
8488 let mut out = InboundHTLCOutput{
8490 amount_msat: 2000000,
8492 payment_hash: PaymentHash([0; 32]),
8493 state: InboundHTLCState::Committed,
8495 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8498 chan.context.pending_outbound_htlcs.push({
8499 let mut out = OutboundHTLCOutput{
8501 amount_msat: 2000000,
8503 payment_hash: PaymentHash([0; 32]),
8504 state: OutboundHTLCState::Committed,
8505 source: HTLCSource::dummy(),
8506 skimmed_fee_msat: None,
8508 out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8511 chan.context.pending_outbound_htlcs.push({
8512 let mut out = OutboundHTLCOutput{
8514 amount_msat: 3000000,
8516 payment_hash: PaymentHash([0; 32]),
8517 state: OutboundHTLCState::Committed,
8518 source: HTLCSource::dummy(),
8519 skimmed_fee_msat: None,
8521 out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8524 chan.context.pending_inbound_htlcs.push({
8525 let mut out = InboundHTLCOutput{
8527 amount_msat: 4000000,
8529 payment_hash: PaymentHash([0; 32]),
8530 state: InboundHTLCState::Committed,
8532 out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8536 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8537 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8538 chan.context.feerate_per_kw = 0;
8540 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8541 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8542 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8545 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8546 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8547 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8550 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8551 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8552 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8555 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8556 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8557 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8560 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8561 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8562 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8565 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8566 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8567 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8570 // commitment tx with seven outputs untrimmed (maximum feerate)
8571 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8572 chan.context.feerate_per_kw = 647;
8574 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8575 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8576 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8579 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8580 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8581 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8584 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8585 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8586 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8589 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8590 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8591 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8594 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8595 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8596 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8599 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8600 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8601 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8604 // commitment tx with six outputs untrimmed (minimum feerate)
8605 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8606 chan.context.feerate_per_kw = 648;
8608 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8609 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8610 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8613 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8614 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8615 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8618 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8619 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8620 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8623 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8624 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8625 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8628 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8629 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8630 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8633 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8634 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8635 chan.context.feerate_per_kw = 645;
8636 chan.context.holder_dust_limit_satoshis = 1001;
8638 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8639 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8640 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8643 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8644 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8645 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8648 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8649 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8650 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8653 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8654 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8655 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8658 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8659 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8660 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8663 // commitment tx with six outputs untrimmed (maximum feerate)
8664 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8665 chan.context.feerate_per_kw = 2069;
8666 chan.context.holder_dust_limit_satoshis = 546;
8668 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8669 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8670 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8673 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8674 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8675 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8678 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8679 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8680 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8683 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8684 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8685 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8688 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8689 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8690 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8693 // commitment tx with five outputs untrimmed (minimum feerate)
8694 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8695 chan.context.feerate_per_kw = 2070;
8697 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8698 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8699 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8702 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8703 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8704 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8707 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8708 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8709 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8712 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8713 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8714 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8717 // commitment tx with five outputs untrimmed (maximum feerate)
8718 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8719 chan.context.feerate_per_kw = 2194;
8721 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8722 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8723 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8726 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8727 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8728 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8731 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8732 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8733 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8736 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8737 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8738 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8741 // commitment tx with four outputs untrimmed (minimum feerate)
8742 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8743 chan.context.feerate_per_kw = 2195;
8745 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8746 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8747 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8750 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8751 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8752 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8755 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8756 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8757 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8760 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8761 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8762 chan.context.feerate_per_kw = 2185;
8763 chan.context.holder_dust_limit_satoshis = 2001;
8764 let cached_channel_type = chan.context.channel_type;
8765 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8767 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8768 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8769 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8772 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8773 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8774 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8777 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8778 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8779 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8782 // commitment tx with four outputs untrimmed (maximum feerate)
8783 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8784 chan.context.feerate_per_kw = 3702;
8785 chan.context.holder_dust_limit_satoshis = 546;
8786 chan.context.channel_type = cached_channel_type.clone();
8788 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8789 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8790 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8793 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8794 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8795 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8798 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8799 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8800 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8803 // commitment tx with three outputs untrimmed (minimum feerate)
8804 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8805 chan.context.feerate_per_kw = 3703;
8807 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8808 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8809 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8812 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8813 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8814 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8817 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8818 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8819 chan.context.feerate_per_kw = 3687;
8820 chan.context.holder_dust_limit_satoshis = 3001;
8821 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8823 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8824 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8825 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8828 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8829 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8830 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8833 // commitment tx with three outputs untrimmed (maximum feerate)
8834 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8835 chan.context.feerate_per_kw = 4914;
8836 chan.context.holder_dust_limit_satoshis = 546;
8837 chan.context.channel_type = cached_channel_type.clone();
8839 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8840 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8841 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8844 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8845 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8846 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8849 // commitment tx with two outputs untrimmed (minimum feerate)
8850 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8851 chan.context.feerate_per_kw = 4915;
8852 chan.context.holder_dust_limit_satoshis = 546;
8854 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8855 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8856 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8858 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8859 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8860 chan.context.feerate_per_kw = 4894;
8861 chan.context.holder_dust_limit_satoshis = 4001;
8862 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8864 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8865 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8866 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8868 // commitment tx with two outputs untrimmed (maximum feerate)
8869 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8870 chan.context.feerate_per_kw = 9651180;
8871 chan.context.holder_dust_limit_satoshis = 546;
8872 chan.context.channel_type = cached_channel_type.clone();
8874 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8875 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8876 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8878 // commitment tx with one output untrimmed (minimum feerate)
8879 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8880 chan.context.feerate_per_kw = 9651181;
8882 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8883 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8884 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8886 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8887 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8888 chan.context.feerate_per_kw = 6216010;
8889 chan.context.holder_dust_limit_satoshis = 4001;
8890 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8892 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8893 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8894 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8896 // commitment tx with fee greater than funder amount
8897 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8898 chan.context.feerate_per_kw = 9651936;
8899 chan.context.holder_dust_limit_satoshis = 546;
8900 chan.context.channel_type = cached_channel_type;
8902 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8903 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8904 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8906 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8907 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8908 chan.context.feerate_per_kw = 253;
8909 chan.context.pending_inbound_htlcs.clear();
8910 chan.context.pending_inbound_htlcs.push({
8911 let mut out = InboundHTLCOutput{
8913 amount_msat: 2000000,
8915 payment_hash: PaymentHash([0; 32]),
8916 state: InboundHTLCState::Committed,
8918 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8921 chan.context.pending_outbound_htlcs.clear();
8922 chan.context.pending_outbound_htlcs.push({
8923 let mut out = OutboundHTLCOutput{
8925 amount_msat: 5000001,
8927 payment_hash: PaymentHash([0; 32]),
8928 state: OutboundHTLCState::Committed,
8929 source: HTLCSource::dummy(),
8930 skimmed_fee_msat: None,
8932 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8935 chan.context.pending_outbound_htlcs.push({
8936 let mut out = OutboundHTLCOutput{
8938 amount_msat: 5000000,
8940 payment_hash: PaymentHash([0; 32]),
8941 state: OutboundHTLCState::Committed,
8942 source: HTLCSource::dummy(),
8943 skimmed_fee_msat: None,
8945 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8949 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8950 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8951 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8954 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8955 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8956 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8958 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8959 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8960 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8962 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8963 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8964 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8967 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8968 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8969 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8970 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8973 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8974 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8975 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8977 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8978 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8979 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8981 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8982 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8983 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8988 fn test_per_commitment_secret_gen() {
8989 // Test vectors from BOLT 3 Appendix D:
8991 let mut seed = [0; 32];
8992 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8993 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8994 hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8996 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8997 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8998 hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9000 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9001 hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9003 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9004 hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9006 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9007 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9008 hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9012 fn test_key_derivation() {
9013 // Test vectors from BOLT 3 Appendix E:
9014 let secp_ctx = Secp256k1::new();
9016 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9017 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9019 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9020 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9022 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9023 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9025 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9026 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
9028 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9029 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9031 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9032 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9034 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9035 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9039 fn test_zero_conf_channel_type_support() {
9040 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9041 let secp_ctx = Secp256k1::new();
9042 let seed = [42; 32];
9043 let network = Network::Testnet;
9044 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9045 let logger = test_utils::TestLogger::new();
9047 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9048 let config = UserConfig::default();
9049 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9050 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
9052 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9053 channel_type_features.set_zero_conf_required();
9055 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9056 open_channel_msg.channel_type = Some(channel_type_features);
9057 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9058 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9059 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9060 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9061 assert!(res.is_ok());
9065 fn test_supports_anchors_zero_htlc_tx_fee() {
9066 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9067 // resulting `channel_type`.
9068 let secp_ctx = Secp256k1::new();
9069 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9070 let network = Network::Testnet;
9071 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9072 let logger = test_utils::TestLogger::new();
9074 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9075 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9077 let mut config = UserConfig::default();
9078 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9080 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9081 // need to signal it.
9082 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9083 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9084 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9087 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9089 let mut expected_channel_type = ChannelTypeFeatures::empty();
9090 expected_channel_type.set_static_remote_key_required();
9091 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9093 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9094 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9095 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9098 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9099 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9100 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9101 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9102 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9105 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9106 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9110 fn test_rejects_implicit_simple_anchors() {
9111 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9112 // each side's `InitFeatures`, it is rejected.
9113 let secp_ctx = Secp256k1::new();
9114 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9115 let network = Network::Testnet;
9116 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9117 let logger = test_utils::TestLogger::new();
9119 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9120 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9122 let config = UserConfig::default();
9124 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9125 let static_remote_key_required: u64 = 1 << 12;
9126 let simple_anchors_required: u64 = 1 << 20;
9127 let raw_init_features = static_remote_key_required | simple_anchors_required;
9128 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9130 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9131 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9132 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9135 // Set `channel_type` to `None` to force the implicit feature negotiation.
9136 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9137 open_channel_msg.channel_type = None;
9139 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9140 // `static_remote_key`, it will fail the channel.
9141 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9142 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9143 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9144 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9146 assert!(channel_b.is_err());
9150 fn test_rejects_simple_anchors_channel_type() {
9151 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9153 let secp_ctx = Secp256k1::new();
9154 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9155 let network = Network::Testnet;
9156 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9157 let logger = test_utils::TestLogger::new();
9159 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9160 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9162 let config = UserConfig::default();
9164 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9165 let static_remote_key_required: u64 = 1 << 12;
9166 let simple_anchors_required: u64 = 1 << 20;
9167 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9168 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9169 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9170 assert!(!simple_anchors_init.requires_unknown_bits());
9171 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9173 // First, we'll try to open a channel between A and B where A requests a channel type for
9174 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9175 // B as it's not supported by LDK.
9176 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9177 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9178 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9181 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9182 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9184 let res = InboundV1Channel::<&TestKeysInterface>::new(
9185 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9186 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9187 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9189 assert!(res.is_err());
9191 // Then, we'll try to open another channel where A requests a channel type for
9192 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9193 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9195 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9196 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9197 10000000, 100000, 42, &config, 0, 42
9200 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9202 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9203 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9204 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9205 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9208 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9209 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9211 let res = channel_a.accept_channel(
9212 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9214 assert!(res.is_err());
9218 fn test_waiting_for_batch() {
9219 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9220 let logger = test_utils::TestLogger::new();
9221 let secp_ctx = Secp256k1::new();
9222 let seed = [42; 32];
9223 let network = Network::Testnet;
9224 let best_block = BestBlock::from_network(network);
9225 let chain_hash = ChainHash::using_genesis_block(network);
9226 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9228 let mut config = UserConfig::default();
9229 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9230 // channel in a batch before all channels are ready.
9231 config.channel_handshake_limits.trust_own_funding_0conf = true;
9233 // Create a channel from node a to node b that will be part of batch funding.
9234 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9235 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9240 &channelmanager::provided_init_features(&config),
9249 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9250 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9251 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9256 &channelmanager::provided_channel_type_features(&config),
9257 &channelmanager::provided_init_features(&config),
9263 true, // Allow node b to send a 0conf channel_ready.
9266 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9267 node_a_chan.accept_channel(
9268 &accept_channel_msg,
9269 &config.channel_handshake_limits,
9270 &channelmanager::provided_init_features(&config),
9273 // Fund the channel with a batch funding transaction.
9274 let output_script = node_a_chan.context.get_funding_redeemscript();
9275 let tx = Transaction {
9277 lock_time: PackedLockTime::ZERO,
9281 value: 10000000, script_pubkey: output_script.clone(),
9284 value: 10000000, script_pubkey: Builder::new().into_script(),
9287 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9288 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9293 ).map_err(|_| ()).unwrap();
9294 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9295 &funding_created_msg.unwrap(),
9299 ).map_err(|_| ()).unwrap();
9300 let node_b_updates = node_b_chan.monitor_updating_restored(
9308 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9309 // broadcasting the funding transaction until the batch is ready.
9310 let _ = node_a_chan.funding_signed(
9311 &funding_signed_msg,
9316 let node_a_updates = node_a_chan.monitor_updating_restored(
9323 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9324 // as the funding transaction depends on all channels in the batch becoming ready.
9325 assert!(node_a_updates.channel_ready.is_none());
9326 assert!(node_a_updates.funding_broadcastable.is_none());
9328 node_a_chan.context.channel_state,
9329 ChannelState::FundingSent as u32 |
9330 ChannelState::WaitingForBatch as u32,
9333 // It is possible to receive a 0conf channel_ready from the remote node.
9334 node_a_chan.channel_ready(
9335 &node_b_updates.channel_ready.unwrap(),
9343 node_a_chan.context.channel_state,
9344 ChannelState::FundingSent as u32 |
9345 ChannelState::WaitingForBatch as u32 |
9346 ChannelState::TheirChannelReady as u32,
9349 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9350 node_a_chan.set_batch_ready();
9352 node_a_chan.context.channel_state,
9353 ChannelState::FundingSent as u32 |
9354 ChannelState::TheirChannelReady as u32,
9356 assert!(node_a_chan.check_get_channel_ready(0).is_some());