1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script,Builder};
12 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
13 use bitcoin::util::sighash;
14 use bitcoin::consensus::encode;
16 use bitcoin::hashes::Hash;
17 use bitcoin::hashes::sha256::Hash as Sha256;
18 use bitcoin::hashes::sha256d::Hash as Sha256d;
19 use bitcoin::hash_types::{Txid, BlockHash};
21 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
22 use bitcoin::secp256k1::{PublicKey,SecretKey};
23 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
24 use bitcoin::secp256k1;
26 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
27 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs::DecodeError;
30 use crate::ln::script::{self, ShutdownScript};
31 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
32 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
33 use crate::ln::chan_utils;
34 use crate::ln::onion_utils::HTLCFailReason;
35 use crate::chain::BestBlock;
36 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
37 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
38 use crate::chain::transaction::{OutPoint, TransactionData};
39 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
40 use crate::events::ClosureReason;
41 use crate::routing::gossip::NodeId;
42 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
43 use crate::util::logger::Logger;
44 use crate::util::errors::APIError;
45 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
46 use crate::util::scid_utils::scid_from_parts;
49 use crate::prelude::*;
50 use core::{cmp,mem,fmt};
52 #[cfg(any(test, fuzzing, debug_assertions))]
53 use crate::sync::Mutex;
54 use bitcoin::hashes::hex::ToHex;
55 use crate::sign::type_resolver::ChannelSignerType;
58 pub struct ChannelValueStat {
59 pub value_to_self_msat: u64,
60 pub channel_value_msat: u64,
61 pub channel_reserve_msat: u64,
62 pub pending_outbound_htlcs_amount_msat: u64,
63 pub pending_inbound_htlcs_amount_msat: u64,
64 pub holding_cell_outbound_amount_msat: u64,
65 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
66 pub counterparty_dust_limit_msat: u64,
69 pub struct AvailableBalances {
70 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
71 pub balance_msat: u64,
72 /// Total amount available for our counterparty to send to us.
73 pub inbound_capacity_msat: u64,
74 /// Total amount available for us to send to our counterparty.
75 pub outbound_capacity_msat: u64,
76 /// The maximum value we can assign to the next outbound HTLC
77 pub next_outbound_htlc_limit_msat: u64,
78 /// The minimum value we can assign to the next outbound HTLC
79 pub next_outbound_htlc_minimum_msat: u64,
82 #[derive(Debug, Clone, Copy, PartialEq)]
84 // Inbound states mirroring InboundHTLCState
86 AwaitingRemoteRevokeToAnnounce,
87 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
88 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
89 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
90 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
91 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
93 // Outbound state can only be `LocalAnnounced` or `Committed`
97 enum InboundHTLCRemovalReason {
98 FailRelay(msgs::OnionErrorPacket),
99 FailMalformed(([u8; 32], u16)),
100 Fulfill(PaymentPreimage),
103 enum InboundHTLCState {
104 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
105 /// update_add_htlc message for this HTLC.
106 RemoteAnnounced(PendingHTLCStatus),
107 /// Included in a received commitment_signed message (implying we've
108 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
109 /// state (see the example below). We have not yet included this HTLC in a
110 /// commitment_signed message because we are waiting on the remote's
111 /// aforementioned state revocation. One reason this missing remote RAA
112 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
113 /// is because every time we create a new "state", i.e. every time we sign a
114 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
115 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
116 /// sent provided the per_commitment_point for our current commitment tx.
117 /// The other reason we should not send a commitment_signed without their RAA
118 /// is because their RAA serves to ACK our previous commitment_signed.
120 /// Here's an example of how an HTLC could come to be in this state:
121 /// remote --> update_add_htlc(prev_htlc) --> local
122 /// remote --> commitment_signed(prev_htlc) --> local
123 /// remote <-- revoke_and_ack <-- local
124 /// remote <-- commitment_signed(prev_htlc) <-- local
125 /// [note that here, the remote does not respond with a RAA]
126 /// remote --> update_add_htlc(this_htlc) --> local
127 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
128 /// Now `this_htlc` will be assigned this state. It's unable to be officially
129 /// accepted, i.e. included in a commitment_signed, because we're missing the
130 /// RAA that provides our next per_commitment_point. The per_commitment_point
131 /// is used to derive commitment keys, which are used to construct the
132 /// signatures in a commitment_signed message.
133 /// Implies AwaitingRemoteRevoke.
135 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
136 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
137 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
138 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
139 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
140 /// channel (before it can then get forwarded and/or removed).
141 /// Implies AwaitingRemoteRevoke.
142 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
144 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
145 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
147 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
148 /// commitment transaction without it as otherwise we'll have to force-close the channel to
149 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
150 /// anyway). That said, ChannelMonitor does this for us (see
151 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
152 /// our own local state before then, once we're sure that the next commitment_signed and
153 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
154 LocalRemoved(InboundHTLCRemovalReason),
157 struct InboundHTLCOutput {
161 payment_hash: PaymentHash,
162 state: InboundHTLCState,
165 enum OutboundHTLCState {
166 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
167 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
168 /// we will promote to Committed (note that they may not accept it until the next time we
169 /// revoke, but we don't really care about that:
170 /// * they've revoked, so worst case we can announce an old state and get our (option on)
171 /// money back (though we won't), and,
172 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
173 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
174 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
175 /// we'll never get out of sync).
176 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
177 /// OutboundHTLCOutput's size just for a temporary bit
178 LocalAnnounced(Box<msgs::OnionPacket>),
180 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
181 /// the change (though they'll need to revoke before we fail the payment).
182 RemoteRemoved(OutboundHTLCOutcome),
183 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
184 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
185 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
186 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
187 /// remote revoke_and_ack on a previous state before we can do so.
188 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
189 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
190 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
191 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
192 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
193 /// revoke_and_ack to drop completely.
194 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
198 enum OutboundHTLCOutcome {
199 /// LDK version 0.0.105+ will always fill in the preimage here.
200 Success(Option<PaymentPreimage>),
201 Failure(HTLCFailReason),
204 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
205 fn from(o: Option<HTLCFailReason>) -> Self {
207 None => OutboundHTLCOutcome::Success(None),
208 Some(r) => OutboundHTLCOutcome::Failure(r)
213 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
214 fn into(self) -> Option<&'a HTLCFailReason> {
216 OutboundHTLCOutcome::Success(_) => None,
217 OutboundHTLCOutcome::Failure(ref r) => Some(r)
222 struct OutboundHTLCOutput {
226 payment_hash: PaymentHash,
227 state: OutboundHTLCState,
229 skimmed_fee_msat: Option<u64>,
232 /// See AwaitingRemoteRevoke ChannelState for more info
233 enum HTLCUpdateAwaitingACK {
234 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
238 payment_hash: PaymentHash,
240 onion_routing_packet: msgs::OnionPacket,
241 // The extra fee we're skimming off the top of this HTLC.
242 skimmed_fee_msat: Option<u64>,
245 payment_preimage: PaymentPreimage,
250 err_packet: msgs::OnionErrorPacket,
254 /// There are a few "states" and then a number of flags which can be applied:
255 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
256 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
257 /// move on to `ChannelReady`.
258 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
259 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
260 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
262 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
263 OurInitSent = 1 << 0,
264 /// Implies we have received their `open_channel`/`accept_channel` message
265 TheirInitSent = 1 << 1,
266 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
267 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
268 /// upon receipt of `funding_created`, so simply skip this state.
270 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
271 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
272 /// and our counterparty consider the funding transaction confirmed.
274 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
275 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
276 TheirChannelReady = 1 << 4,
277 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
278 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
279 OurChannelReady = 1 << 5,
281 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
282 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
284 PeerDisconnected = 1 << 7,
285 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
286 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
287 /// sending any outbound messages until they've managed to finish.
288 MonitorUpdateInProgress = 1 << 8,
289 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
290 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
291 /// messages as then we will be unable to determine which HTLCs they included in their
292 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
294 /// Flag is set on `ChannelReady`.
295 AwaitingRemoteRevoke = 1 << 9,
296 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
297 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
298 /// to respond with our own shutdown message when possible.
299 RemoteShutdownSent = 1 << 10,
300 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
301 /// point, we may not add any new HTLCs to the channel.
302 LocalShutdownSent = 1 << 11,
303 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
304 /// to drop us, but we store this anyway.
305 ShutdownComplete = 4096,
306 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
307 /// broadcasting of the funding transaction is being held until all channels in the batch
308 /// have received funding_signed and have their monitors persisted.
309 WaitingForBatch = 1 << 13,
311 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
312 ChannelState::LocalShutdownSent as u32 |
313 ChannelState::RemoteShutdownSent as u32;
314 const MULTI_STATE_FLAGS: u32 =
315 BOTH_SIDES_SHUTDOWN_MASK |
316 ChannelState::PeerDisconnected as u32 |
317 ChannelState::MonitorUpdateInProgress as u32;
318 const STATE_FLAGS: u32 =
320 ChannelState::TheirChannelReady as u32 |
321 ChannelState::OurChannelReady as u32 |
322 ChannelState::AwaitingRemoteRevoke as u32 |
323 ChannelState::WaitingForBatch as u32;
325 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
327 pub const DEFAULT_MAX_HTLCS: u16 = 50;
329 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
330 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
331 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
332 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
336 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
338 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
340 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
342 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
343 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
344 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
345 /// `holder_max_htlc_value_in_flight_msat`.
346 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
348 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
349 /// `option_support_large_channel` (aka wumbo channels) is not supported.
351 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
353 /// Total bitcoin supply in satoshis.
354 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
356 /// The maximum network dust limit for standard script formats. This currently represents the
357 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
358 /// transaction non-standard and thus refuses to relay it.
359 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
360 /// implementations use this value for their dust limit today.
361 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
363 /// The maximum channel dust limit we will accept from our counterparty.
364 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
366 /// The dust limit is used for both the commitment transaction outputs as well as the closing
367 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
368 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
369 /// In order to avoid having to concern ourselves with standardness during the closing process, we
370 /// simply require our counterparty to use a dust limit which will leave any segwit output
372 /// See <https://github.com/lightning/bolts/issues/905> for more details.
373 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
375 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
376 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
378 /// Used to return a simple Error back to ChannelManager. Will get converted to a
379 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
380 /// channel_id in ChannelManager.
381 pub(super) enum ChannelError {
387 impl fmt::Debug for ChannelError {
388 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
390 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
391 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
392 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
397 impl fmt::Display for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
401 &ChannelError::Warn(ref e) => write!(f, "{}", e),
402 &ChannelError::Close(ref e) => write!(f, "{}", e),
407 macro_rules! secp_check {
408 ($res: expr, $err: expr) => {
411 Err(_) => return Err(ChannelError::Close($err)),
416 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
417 /// our counterparty or not. However, we don't want to announce updates right away to avoid
418 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
419 /// our channel_update message and track the current state here.
420 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
421 #[derive(Clone, Copy, PartialEq)]
422 pub(super) enum ChannelUpdateStatus {
423 /// We've announced the channel as enabled and are connected to our peer.
425 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
427 /// Our channel is live again, but we haven't announced the channel as enabled yet.
429 /// We've announced the channel as disabled.
433 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
435 pub enum AnnouncementSigsState {
436 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
437 /// we sent the last `AnnouncementSignatures`.
439 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
440 /// This state never appears on disk - instead we write `NotSent`.
442 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
443 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
444 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
445 /// they send back a `RevokeAndACK`.
446 /// This state never appears on disk - instead we write `NotSent`.
448 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
449 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
453 /// An enum indicating whether the local or remote side offered a given HTLC.
459 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
462 pending_htlcs_value_msat: u64,
463 on_counterparty_tx_dust_exposure_msat: u64,
464 on_holder_tx_dust_exposure_msat: u64,
465 holding_cell_msat: u64,
466 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
469 /// An enum gathering stats on commitment transaction, either local or remote.
470 struct CommitmentStats<'a> {
471 tx: CommitmentTransaction, // the transaction info
472 feerate_per_kw: u32, // the feerate included to build the transaction
473 total_fee_sat: u64, // the total fee included in the transaction
474 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
475 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
476 local_balance_msat: u64, // local balance before fees but considering dust limits
477 remote_balance_msat: u64, // remote balance before fees but considering dust limits
478 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
481 /// Used when calculating whether we or the remote can afford an additional HTLC.
482 struct HTLCCandidate {
484 origin: HTLCInitiator,
488 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
496 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
498 enum UpdateFulfillFetch {
500 monitor_update: ChannelMonitorUpdate,
501 htlc_value_msat: u64,
502 msg: Option<msgs::UpdateFulfillHTLC>,
507 /// The return type of get_update_fulfill_htlc_and_commit.
508 pub enum UpdateFulfillCommitFetch {
509 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
510 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
511 /// previously placed in the holding cell (and has since been removed).
513 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
514 monitor_update: ChannelMonitorUpdate,
515 /// The value of the HTLC which was claimed, in msat.
516 htlc_value_msat: u64,
518 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
519 /// or has been forgotten (presumably previously claimed).
523 /// The return value of `monitor_updating_restored`
524 pub(super) struct MonitorRestoreUpdates {
525 pub raa: Option<msgs::RevokeAndACK>,
526 pub commitment_update: Option<msgs::CommitmentUpdate>,
527 pub order: RAACommitmentOrder,
528 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
529 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
530 pub finalized_claimed_htlcs: Vec<HTLCSource>,
531 pub funding_broadcastable: Option<Transaction>,
532 pub channel_ready: Option<msgs::ChannelReady>,
533 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
536 /// The return value of `channel_reestablish`
537 pub(super) struct ReestablishResponses {
538 pub channel_ready: Option<msgs::ChannelReady>,
539 pub raa: Option<msgs::RevokeAndACK>,
540 pub commitment_update: Option<msgs::CommitmentUpdate>,
541 pub order: RAACommitmentOrder,
542 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
543 pub shutdown_msg: Option<msgs::Shutdown>,
546 /// The result of a shutdown that should be handled.
548 pub(crate) struct ShutdownResult {
549 /// A channel monitor update to apply.
550 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
551 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
552 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
553 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
554 /// propagated to the remainder of the batch.
555 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
558 /// If the majority of the channels funds are to the fundee and the initiator holds only just
559 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
560 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
561 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
562 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
563 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
564 /// by this multiple without hitting this case, before sending.
565 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
566 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
567 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
568 /// leave the channel less usable as we hold a bigger reserve.
569 #[cfg(any(fuzzing, test))]
570 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
571 #[cfg(not(any(fuzzing, test)))]
572 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
574 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
575 /// channel creation on an inbound channel, we simply force-close and move on.
576 /// This constant is the one suggested in BOLT 2.
577 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
579 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
580 /// not have enough balance value remaining to cover the onchain cost of this new
581 /// HTLC weight. If this happens, our counterparty fails the reception of our
582 /// commitment_signed including this new HTLC due to infringement on the channel
584 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
585 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
586 /// leads to a channel force-close. Ultimately, this is an issue coming from the
587 /// design of LN state machines, allowing asynchronous updates.
588 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
590 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
591 /// commitment transaction fees, with at least this many HTLCs present on the commitment
592 /// transaction (not counting the value of the HTLCs themselves).
593 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
595 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
596 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
597 /// ChannelUpdate prompted by the config update. This value was determined as follows:
599 /// * The expected interval between ticks (1 minute).
600 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
601 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
602 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
603 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
605 /// The number of ticks that may elapse while we're waiting for a response to a
606 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
609 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
610 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
612 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
613 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
614 /// exceeding this age limit will be force-closed and purged from memory.
615 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
617 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
618 pub(crate) const COINBASE_MATURITY: u32 = 100;
620 struct PendingChannelMonitorUpdate {
621 update: ChannelMonitorUpdate,
624 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
625 (0, update, required),
628 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
629 /// its variants containing an appropriate channel struct.
630 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
631 UnfundedOutboundV1(OutboundV1Channel<SP>),
632 UnfundedInboundV1(InboundV1Channel<SP>),
636 impl<'a, SP: Deref> ChannelPhase<SP> where
637 SP::Target: SignerProvider,
638 <SP::Target as SignerProvider>::Signer: ChannelSigner,
640 pub fn context(&'a self) -> &'a ChannelContext<SP> {
642 ChannelPhase::Funded(chan) => &chan.context,
643 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
644 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
648 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
650 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
651 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
652 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
657 /// Contains all state common to unfunded inbound/outbound channels.
658 pub(super) struct UnfundedChannelContext {
659 /// A counter tracking how many ticks have elapsed since this unfunded channel was
660 /// created. If this unfunded channel reaches peer has yet to respond after reaching
661 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
663 /// This is so that we don't keep channels around that haven't progressed to a funded state
664 /// in a timely manner.
665 unfunded_channel_age_ticks: usize,
668 impl UnfundedChannelContext {
669 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
670 /// having reached the unfunded channel age limit.
672 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
673 pub fn should_expire_unfunded_channel(&mut self) -> bool {
674 self.unfunded_channel_age_ticks += 1;
675 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
679 /// Contains everything about the channel including state, and various flags.
680 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
681 config: LegacyChannelConfig,
683 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
684 // constructed using it. The second element in the tuple corresponds to the number of ticks that
685 // have elapsed since the update occurred.
686 prev_config: Option<(ChannelConfig, usize)>,
688 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
692 /// The current channel ID.
693 channel_id: ChannelId,
694 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
695 /// Will be `None` for channels created prior to 0.0.115.
696 temporary_channel_id: Option<ChannelId>,
699 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
700 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
702 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
703 // Note that a number of our tests were written prior to the behavior here which retransmits
704 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
706 #[cfg(any(test, feature = "_test_utils"))]
707 pub(crate) announcement_sigs_state: AnnouncementSigsState,
708 #[cfg(not(any(test, feature = "_test_utils")))]
709 announcement_sigs_state: AnnouncementSigsState,
711 secp_ctx: Secp256k1<secp256k1::All>,
712 channel_value_satoshis: u64,
714 latest_monitor_update_id: u64,
716 holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
717 shutdown_scriptpubkey: Option<ShutdownScript>,
718 destination_script: Script,
720 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
721 // generation start at 0 and count up...this simplifies some parts of implementation at the
722 // cost of others, but should really just be changed.
724 cur_holder_commitment_transaction_number: u64,
725 cur_counterparty_commitment_transaction_number: u64,
726 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
727 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
728 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
729 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
731 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
732 /// need to ensure we resend them in the order we originally generated them. Note that because
733 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
734 /// sufficient to simply set this to the opposite of any message we are generating as we
735 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
736 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
738 resend_order: RAACommitmentOrder,
740 monitor_pending_channel_ready: bool,
741 monitor_pending_revoke_and_ack: bool,
742 monitor_pending_commitment_signed: bool,
744 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
745 // responsible for some of the HTLCs here or not - we don't know whether the update in question
746 // completed or not. We currently ignore these fields entirely when force-closing a channel,
747 // but need to handle this somehow or we run the risk of losing HTLCs!
748 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
749 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
750 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
752 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
753 /// but our signer (initially) refused to give us a signature, we should retry at some point in
754 /// the future when the signer indicates it may have a signature for us.
756 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
757 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
758 signer_pending_commitment_update: bool,
760 // pending_update_fee is filled when sending and receiving update_fee.
762 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
763 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
764 // generating new commitment transactions with exactly the same criteria as inbound/outbound
765 // HTLCs with similar state.
766 pending_update_fee: Option<(u32, FeeUpdateState)>,
767 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
768 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
769 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
770 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
771 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
772 holding_cell_update_fee: Option<u32>,
773 next_holder_htlc_id: u64,
774 next_counterparty_htlc_id: u64,
777 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
778 /// when the channel is updated in ways which may impact the `channel_update` message or when a
779 /// new block is received, ensuring it's always at least moderately close to the current real
781 update_time_counter: u32,
783 #[cfg(debug_assertions)]
784 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
785 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
786 #[cfg(debug_assertions)]
787 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
788 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
790 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
791 target_closing_feerate_sats_per_kw: Option<u32>,
793 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
794 /// update, we need to delay processing it until later. We do that here by simply storing the
795 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
796 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
798 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
799 /// transaction. These are set once we reach `closing_negotiation_ready`.
801 pub(crate) closing_fee_limits: Option<(u64, u64)>,
803 closing_fee_limits: Option<(u64, u64)>,
805 /// The hash of the block in which the funding transaction was included.
806 funding_tx_confirmed_in: Option<BlockHash>,
807 funding_tx_confirmation_height: u32,
808 short_channel_id: Option<u64>,
809 /// Either the height at which this channel was created or the height at which it was last
810 /// serialized if it was serialized by versions prior to 0.0.103.
811 /// We use this to close if funding is never broadcasted.
812 channel_creation_height: u32,
814 counterparty_dust_limit_satoshis: u64,
817 pub(super) holder_dust_limit_satoshis: u64,
819 holder_dust_limit_satoshis: u64,
822 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
824 counterparty_max_htlc_value_in_flight_msat: u64,
827 pub(super) holder_max_htlc_value_in_flight_msat: u64,
829 holder_max_htlc_value_in_flight_msat: u64,
831 /// minimum channel reserve for self to maintain - set by them.
832 counterparty_selected_channel_reserve_satoshis: Option<u64>,
835 pub(super) holder_selected_channel_reserve_satoshis: u64,
837 holder_selected_channel_reserve_satoshis: u64,
839 counterparty_htlc_minimum_msat: u64,
840 holder_htlc_minimum_msat: u64,
842 pub counterparty_max_accepted_htlcs: u16,
844 counterparty_max_accepted_htlcs: u16,
845 holder_max_accepted_htlcs: u16,
846 minimum_depth: Option<u32>,
848 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
850 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
851 funding_transaction: Option<Transaction>,
852 is_batch_funding: Option<()>,
854 counterparty_cur_commitment_point: Option<PublicKey>,
855 counterparty_prev_commitment_point: Option<PublicKey>,
856 counterparty_node_id: PublicKey,
858 counterparty_shutdown_scriptpubkey: Option<Script>,
860 commitment_secrets: CounterpartyCommitmentSecrets,
862 channel_update_status: ChannelUpdateStatus,
863 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
864 /// not complete within a single timer tick (one minute), we should force-close the channel.
865 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
867 /// Note that this field is reset to false on deserialization to give us a chance to connect to
868 /// our peer and start the closing_signed negotiation fresh.
869 closing_signed_in_flight: bool,
871 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
872 /// This can be used to rebroadcast the channel_announcement message later.
873 announcement_sigs: Option<(Signature, Signature)>,
875 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
876 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
877 // be, by comparing the cached values to the fee of the tranaction generated by
878 // `build_commitment_transaction`.
879 #[cfg(any(test, fuzzing))]
880 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
881 #[cfg(any(test, fuzzing))]
882 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
884 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
885 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
886 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
887 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
888 /// message until we receive a channel_reestablish.
890 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
891 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
893 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
894 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
895 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
896 /// unblock the state machine.
898 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
899 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
900 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
902 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
903 /// [`msgs::RevokeAndACK`] message from the counterparty.
904 sent_message_awaiting_response: Option<usize>,
906 #[cfg(any(test, fuzzing))]
907 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
908 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
909 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
910 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
911 // is fine, but as a sanity check in our failure to generate the second claim, we check here
912 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
913 historical_inbound_htlc_fulfills: HashSet<u64>,
915 /// This channel's type, as negotiated during channel open
916 channel_type: ChannelTypeFeatures,
918 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
919 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
920 // the channel's funding UTXO.
922 // We also use this when sending our peer a channel_update that isn't to be broadcasted
923 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
924 // associated channel mapping.
926 // We only bother storing the most recent SCID alias at any time, though our counterparty has
927 // to store all of them.
928 latest_inbound_scid_alias: Option<u64>,
930 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
931 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
932 // don't currently support node id aliases and eventually privacy should be provided with
933 // blinded paths instead of simple scid+node_id aliases.
934 outbound_scid_alias: u64,
936 // We track whether we already emitted a `ChannelPending` event.
937 channel_pending_event_emitted: bool,
939 // We track whether we already emitted a `ChannelReady` event.
940 channel_ready_event_emitted: bool,
942 /// The unique identifier used to re-derive the private key material for the channel through
943 /// [`SignerProvider::derive_channel_signer`].
944 channel_keys_id: [u8; 32],
946 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
947 /// store it here and only release it to the `ChannelManager` once it asks for it.
948 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
951 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
952 /// Allowed in any state (including after shutdown)
953 pub fn get_update_time_counter(&self) -> u32 {
954 self.update_time_counter
957 pub fn get_latest_monitor_update_id(&self) -> u64 {
958 self.latest_monitor_update_id
961 pub fn should_announce(&self) -> bool {
962 self.config.announced_channel
965 pub fn is_outbound(&self) -> bool {
966 self.channel_transaction_parameters.is_outbound_from_holder
969 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
970 /// Allowed in any state (including after shutdown)
971 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
972 self.config.options.forwarding_fee_base_msat
975 /// Returns true if we've ever received a message from the remote end for this Channel
976 pub fn have_received_message(&self) -> bool {
977 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
980 /// Returns true if this channel is fully established and not known to be closing.
981 /// Allowed in any state (including after shutdown)
982 pub fn is_usable(&self) -> bool {
983 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
984 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
987 /// shutdown state returns the state of the channel in its various stages of shutdown
988 pub fn shutdown_state(&self) -> ChannelShutdownState {
989 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
990 return ChannelShutdownState::ShutdownComplete;
992 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
993 return ChannelShutdownState::ShutdownInitiated;
995 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
996 return ChannelShutdownState::ResolvingHTLCs;
998 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
999 return ChannelShutdownState::NegotiatingClosingFee;
1001 return ChannelShutdownState::NotShuttingDown;
1004 fn closing_negotiation_ready(&self) -> bool {
1005 self.pending_inbound_htlcs.is_empty() &&
1006 self.pending_outbound_htlcs.is_empty() &&
1007 self.pending_update_fee.is_none() &&
1008 self.channel_state &
1009 (BOTH_SIDES_SHUTDOWN_MASK |
1010 ChannelState::AwaitingRemoteRevoke as u32 |
1011 ChannelState::PeerDisconnected as u32 |
1012 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1015 /// Returns true if this channel is currently available for use. This is a superset of
1016 /// is_usable() and considers things like the channel being temporarily disabled.
1017 /// Allowed in any state (including after shutdown)
1018 pub fn is_live(&self) -> bool {
1019 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1022 // Public utilities:
1024 pub fn channel_id(&self) -> ChannelId {
1028 // Return the `temporary_channel_id` used during channel establishment.
1030 // Will return `None` for channels created prior to LDK version 0.0.115.
1031 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1032 self.temporary_channel_id
1035 pub fn minimum_depth(&self) -> Option<u32> {
1039 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1040 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1041 pub fn get_user_id(&self) -> u128 {
1045 /// Gets the channel's type
1046 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1050 /// Gets the channel's `short_channel_id`.
1052 /// Will return `None` if the channel hasn't been confirmed yet.
1053 pub fn get_short_channel_id(&self) -> Option<u64> {
1054 self.short_channel_id
1057 /// Allowed in any state (including after shutdown)
1058 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1059 self.latest_inbound_scid_alias
1062 /// Allowed in any state (including after shutdown)
1063 pub fn outbound_scid_alias(&self) -> u64 {
1064 self.outbound_scid_alias
1067 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1068 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1069 /// or prior to any channel actions during `Channel` initialization.
1070 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1071 debug_assert_eq!(self.outbound_scid_alias, 0);
1072 self.outbound_scid_alias = outbound_scid_alias;
1075 /// Returns the funding_txo we either got from our peer, or were given by
1076 /// get_funding_created.
1077 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1078 self.channel_transaction_parameters.funding_outpoint
1081 /// Returns the block hash in which our funding transaction was confirmed.
1082 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1083 self.funding_tx_confirmed_in
1086 /// Returns the current number of confirmations on the funding transaction.
1087 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1088 if self.funding_tx_confirmation_height == 0 {
1089 // We either haven't seen any confirmation yet, or observed a reorg.
1093 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1096 fn get_holder_selected_contest_delay(&self) -> u16 {
1097 self.channel_transaction_parameters.holder_selected_contest_delay
1100 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1101 &self.channel_transaction_parameters.holder_pubkeys
1104 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1105 self.channel_transaction_parameters.counterparty_parameters
1106 .as_ref().map(|params| params.selected_contest_delay)
1109 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1110 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1113 /// Allowed in any state (including after shutdown)
1114 pub fn get_counterparty_node_id(&self) -> PublicKey {
1115 self.counterparty_node_id
1118 /// Allowed in any state (including after shutdown)
1119 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1120 self.holder_htlc_minimum_msat
1123 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1124 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1125 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1128 /// Allowed in any state (including after shutdown)
1129 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1131 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1132 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1133 // channel might have been used to route very small values (either by honest users or as DoS).
1134 self.channel_value_satoshis * 1000 * 9 / 10,
1136 self.counterparty_max_htlc_value_in_flight_msat
1140 /// Allowed in any state (including after shutdown)
1141 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1142 self.counterparty_htlc_minimum_msat
1145 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1146 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1147 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1150 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1151 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1152 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1154 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1155 party_max_htlc_value_in_flight_msat
1160 pub fn get_value_satoshis(&self) -> u64 {
1161 self.channel_value_satoshis
1164 pub fn get_fee_proportional_millionths(&self) -> u32 {
1165 self.config.options.forwarding_fee_proportional_millionths
1168 pub fn get_cltv_expiry_delta(&self) -> u16 {
1169 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1172 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1173 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1174 where F::Target: FeeEstimator
1176 match self.config.options.max_dust_htlc_exposure {
1177 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1178 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1179 ConfirmationTarget::OnChainSweep);
1180 feerate_per_kw as u64 * multiplier
1182 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1186 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1187 pub fn prev_config(&self) -> Option<ChannelConfig> {
1188 self.prev_config.map(|prev_config| prev_config.0)
1191 // Checks whether we should emit a `ChannelPending` event.
1192 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1193 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1196 // Returns whether we already emitted a `ChannelPending` event.
1197 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1198 self.channel_pending_event_emitted
1201 // Remembers that we already emitted a `ChannelPending` event.
1202 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1203 self.channel_pending_event_emitted = true;
1206 // Checks whether we should emit a `ChannelReady` event.
1207 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1208 self.is_usable() && !self.channel_ready_event_emitted
1211 // Remembers that we already emitted a `ChannelReady` event.
1212 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1213 self.channel_ready_event_emitted = true;
1216 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1217 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1218 /// no longer be considered when forwarding HTLCs.
1219 pub fn maybe_expire_prev_config(&mut self) {
1220 if self.prev_config.is_none() {
1223 let prev_config = self.prev_config.as_mut().unwrap();
1225 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1226 self.prev_config = None;
1230 /// Returns the current [`ChannelConfig`] applied to the channel.
1231 pub fn config(&self) -> ChannelConfig {
1235 /// Updates the channel's config. A bool is returned indicating whether the config update
1236 /// applied resulted in a new ChannelUpdate message.
1237 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1238 let did_channel_update =
1239 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1240 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1241 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1242 if did_channel_update {
1243 self.prev_config = Some((self.config.options, 0));
1244 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1245 // policy change to propagate throughout the network.
1246 self.update_time_counter += 1;
1248 self.config.options = *config;
1252 /// Returns true if funding_signed was sent/received and the
1253 /// funding transaction has been broadcast if necessary.
1254 pub fn is_funding_broadcast(&self) -> bool {
1255 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1256 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1259 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1260 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1261 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1262 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1263 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1265 /// @local is used only to convert relevant internal structures which refer to remote vs local
1266 /// to decide value of outputs and direction of HTLCs.
1267 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1268 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1269 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1270 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1271 /// which peer generated this transaction and "to whom" this transaction flows.
1273 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1274 where L::Target: Logger
1276 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1277 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1278 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1280 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1281 let mut remote_htlc_total_msat = 0;
1282 let mut local_htlc_total_msat = 0;
1283 let mut value_to_self_msat_offset = 0;
1285 let mut feerate_per_kw = self.feerate_per_kw;
1286 if let Some((feerate, update_state)) = self.pending_update_fee {
1287 if match update_state {
1288 // Note that these match the inclusion criteria when scanning
1289 // pending_inbound_htlcs below.
1290 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1291 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1292 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1294 feerate_per_kw = feerate;
1298 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1299 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1300 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1302 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1304 macro_rules! get_htlc_in_commitment {
1305 ($htlc: expr, $offered: expr) => {
1306 HTLCOutputInCommitment {
1308 amount_msat: $htlc.amount_msat,
1309 cltv_expiry: $htlc.cltv_expiry,
1310 payment_hash: $htlc.payment_hash,
1311 transaction_output_index: None
1316 macro_rules! add_htlc_output {
1317 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1318 if $outbound == local { // "offered HTLC output"
1319 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1320 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1323 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1325 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1326 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1327 included_non_dust_htlcs.push((htlc_in_tx, $source));
1329 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1330 included_dust_htlcs.push((htlc_in_tx, $source));
1333 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1334 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1337 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1339 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1340 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1341 included_non_dust_htlcs.push((htlc_in_tx, $source));
1343 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1344 included_dust_htlcs.push((htlc_in_tx, $source));
1350 for ref htlc in self.pending_inbound_htlcs.iter() {
1351 let (include, state_name) = match htlc.state {
1352 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1353 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1354 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1355 InboundHTLCState::Committed => (true, "Committed"),
1356 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1360 add_htlc_output!(htlc, false, None, state_name);
1361 remote_htlc_total_msat += htlc.amount_msat;
1363 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1365 &InboundHTLCState::LocalRemoved(ref reason) => {
1366 if generated_by_local {
1367 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1368 value_to_self_msat_offset += htlc.amount_msat as i64;
1377 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1379 for ref htlc in self.pending_outbound_htlcs.iter() {
1380 let (include, state_name) = match htlc.state {
1381 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1382 OutboundHTLCState::Committed => (true, "Committed"),
1383 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1384 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1385 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1388 let preimage_opt = match htlc.state {
1389 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1390 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1391 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1395 if let Some(preimage) = preimage_opt {
1396 preimages.push(preimage);
1400 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1401 local_htlc_total_msat += htlc.amount_msat;
1403 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1405 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1406 value_to_self_msat_offset -= htlc.amount_msat as i64;
1408 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1409 if !generated_by_local {
1410 value_to_self_msat_offset -= htlc.amount_msat as i64;
1418 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1419 assert!(value_to_self_msat >= 0);
1420 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1421 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1422 // "violate" their reserve value by couting those against it. Thus, we have to convert
1423 // everything to i64 before subtracting as otherwise we can overflow.
1424 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1425 assert!(value_to_remote_msat >= 0);
1427 #[cfg(debug_assertions)]
1429 // Make sure that the to_self/to_remote is always either past the appropriate
1430 // channel_reserve *or* it is making progress towards it.
1431 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1432 self.holder_max_commitment_tx_output.lock().unwrap()
1434 self.counterparty_max_commitment_tx_output.lock().unwrap()
1436 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1437 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1438 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1439 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1442 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1443 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1444 let (value_to_self, value_to_remote) = if self.is_outbound() {
1445 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1447 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1450 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1451 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1452 let (funding_pubkey_a, funding_pubkey_b) = if local {
1453 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1455 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1458 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1459 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1464 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1465 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1470 let num_nondust_htlcs = included_non_dust_htlcs.len();
1472 let channel_parameters =
1473 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1474 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1475 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1482 &mut included_non_dust_htlcs,
1485 let mut htlcs_included = included_non_dust_htlcs;
1486 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1487 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1488 htlcs_included.append(&mut included_dust_htlcs);
1490 // For the stats, trimmed-to-0 the value in msats accordingly
1491 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1492 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1500 local_balance_msat: value_to_self_msat as u64,
1501 remote_balance_msat: value_to_remote_msat as u64,
1507 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1508 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1509 /// our counterparty!)
1510 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1511 /// TODO Some magic rust shit to compile-time check this?
1512 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1513 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1514 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1515 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1516 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1518 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1522 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1523 /// will sign and send to our counterparty.
1524 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1525 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1526 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1527 //may see payments to it!
1528 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1529 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1530 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1532 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1535 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1536 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1537 /// Panics if called before accept_channel/InboundV1Channel::new
1538 pub fn get_funding_redeemscript(&self) -> Script {
1539 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1542 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1543 &self.get_counterparty_pubkeys().funding_pubkey
1546 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1550 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1551 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1552 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1553 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1554 // more dust balance if the feerate increases when we have several HTLCs pending
1555 // which are near the dust limit.
1556 let mut feerate_per_kw = self.feerate_per_kw;
1557 // If there's a pending update fee, use it to ensure we aren't under-estimating
1558 // potential feerate updates coming soon.
1559 if let Some((feerate, _)) = self.pending_update_fee {
1560 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1562 if let Some(feerate) = outbound_feerate_update {
1563 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1565 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1568 /// Get forwarding information for the counterparty.
1569 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1570 self.counterparty_forwarding_info.clone()
1573 /// Returns a HTLCStats about inbound pending htlcs
1574 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1576 let mut stats = HTLCStats {
1577 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1578 pending_htlcs_value_msat: 0,
1579 on_counterparty_tx_dust_exposure_msat: 0,
1580 on_holder_tx_dust_exposure_msat: 0,
1581 holding_cell_msat: 0,
1582 on_holder_tx_holding_cell_htlcs_count: 0,
1585 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1588 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1589 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1590 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1592 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1593 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1594 for ref htlc in context.pending_inbound_htlcs.iter() {
1595 stats.pending_htlcs_value_msat += htlc.amount_msat;
1596 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1597 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1599 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1600 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1606 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1607 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1609 let mut stats = HTLCStats {
1610 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1611 pending_htlcs_value_msat: 0,
1612 on_counterparty_tx_dust_exposure_msat: 0,
1613 on_holder_tx_dust_exposure_msat: 0,
1614 holding_cell_msat: 0,
1615 on_holder_tx_holding_cell_htlcs_count: 0,
1618 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1621 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1622 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1623 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1625 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1626 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1627 for ref htlc in context.pending_outbound_htlcs.iter() {
1628 stats.pending_htlcs_value_msat += htlc.amount_msat;
1629 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1630 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1632 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1633 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1637 for update in context.holding_cell_htlc_updates.iter() {
1638 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1639 stats.pending_htlcs += 1;
1640 stats.pending_htlcs_value_msat += amount_msat;
1641 stats.holding_cell_msat += amount_msat;
1642 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1643 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1645 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1646 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1648 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1655 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1656 /// Doesn't bother handling the
1657 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1658 /// corner case properly.
1659 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1660 -> AvailableBalances
1661 where F::Target: FeeEstimator
1663 let context = &self;
1664 // Note that we have to handle overflow due to the above case.
1665 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1666 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1668 let mut balance_msat = context.value_to_self_msat;
1669 for ref htlc in context.pending_inbound_htlcs.iter() {
1670 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1671 balance_msat += htlc.amount_msat;
1674 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1676 let outbound_capacity_msat = context.value_to_self_msat
1677 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1679 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1681 let mut available_capacity_msat = outbound_capacity_msat;
1683 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1684 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1688 if context.is_outbound() {
1689 // We should mind channel commit tx fee when computing how much of the available capacity
1690 // can be used in the next htlc. Mirrors the logic in send_htlc.
1692 // The fee depends on whether the amount we will be sending is above dust or not,
1693 // and the answer will in turn change the amount itself — making it a circular
1695 // This complicates the computation around dust-values, up to the one-htlc-value.
1696 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1697 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1698 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1701 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1702 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1703 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1704 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1705 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1706 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1707 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1710 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1711 // value ends up being below dust, we have this fee available again. In that case,
1712 // match the value to right-below-dust.
1713 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1714 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1715 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1716 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1717 debug_assert!(one_htlc_difference_msat != 0);
1718 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1719 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1720 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1722 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1725 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1726 // sending a new HTLC won't reduce their balance below our reserve threshold.
1727 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1728 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1729 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1732 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1733 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1735 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1736 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1737 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1739 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1740 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1741 // we've selected for them, we can only send dust HTLCs.
1742 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1746 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1748 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1749 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1750 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1751 // send above the dust limit (as the router can always overpay to meet the dust limit).
1752 let mut remaining_msat_below_dust_exposure_limit = None;
1753 let mut dust_exposure_dust_limit_msat = 0;
1754 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1756 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1757 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1759 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1760 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1761 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1763 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1764 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1765 remaining_msat_below_dust_exposure_limit =
1766 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1767 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1770 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1771 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1772 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1773 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1774 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1775 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1778 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1779 if available_capacity_msat < dust_exposure_dust_limit_msat {
1780 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1782 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1786 available_capacity_msat = cmp::min(available_capacity_msat,
1787 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1789 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1790 available_capacity_msat = 0;
1794 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1795 - context.value_to_self_msat as i64
1796 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1797 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1799 outbound_capacity_msat,
1800 next_outbound_htlc_limit_msat: available_capacity_msat,
1801 next_outbound_htlc_minimum_msat,
1806 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1807 let context = &self;
1808 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1811 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1812 /// number of pending HTLCs that are on track to be in our next commitment tx.
1814 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1815 /// `fee_spike_buffer_htlc` is `Some`.
1817 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1818 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1820 /// Dust HTLCs are excluded.
1821 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1822 let context = &self;
1823 assert!(context.is_outbound());
1825 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1828 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1829 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1831 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1832 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1834 let mut addl_htlcs = 0;
1835 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1837 HTLCInitiator::LocalOffered => {
1838 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1842 HTLCInitiator::RemoteOffered => {
1843 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1849 let mut included_htlcs = 0;
1850 for ref htlc in context.pending_inbound_htlcs.iter() {
1851 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1854 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1855 // transaction including this HTLC if it times out before they RAA.
1856 included_htlcs += 1;
1859 for ref htlc in context.pending_outbound_htlcs.iter() {
1860 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1864 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1865 OutboundHTLCState::Committed => included_htlcs += 1,
1866 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1867 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1868 // transaction won't be generated until they send us their next RAA, which will mean
1869 // dropping any HTLCs in this state.
1874 for htlc in context.holding_cell_htlc_updates.iter() {
1876 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1877 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1882 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1883 // ack we're guaranteed to never include them in commitment txs anymore.
1887 let num_htlcs = included_htlcs + addl_htlcs;
1888 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1889 #[cfg(any(test, fuzzing))]
1892 if fee_spike_buffer_htlc.is_some() {
1893 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1895 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1896 + context.holding_cell_htlc_updates.len();
1897 let commitment_tx_info = CommitmentTxInfoCached {
1899 total_pending_htlcs,
1900 next_holder_htlc_id: match htlc.origin {
1901 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1902 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1904 next_counterparty_htlc_id: match htlc.origin {
1905 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1906 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1908 feerate: context.feerate_per_kw,
1910 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1915 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1916 /// pending HTLCs that are on track to be in their next commitment tx
1918 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1919 /// `fee_spike_buffer_htlc` is `Some`.
1921 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1922 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1924 /// Dust HTLCs are excluded.
1925 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1926 let context = &self;
1927 assert!(!context.is_outbound());
1929 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1932 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1933 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1935 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1936 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1938 let mut addl_htlcs = 0;
1939 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1941 HTLCInitiator::LocalOffered => {
1942 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1946 HTLCInitiator::RemoteOffered => {
1947 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1953 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
1954 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
1955 // committed outbound HTLCs, see below.
1956 let mut included_htlcs = 0;
1957 for ref htlc in context.pending_inbound_htlcs.iter() {
1958 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
1961 included_htlcs += 1;
1964 for ref htlc in context.pending_outbound_htlcs.iter() {
1965 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
1968 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
1969 // i.e. if they've responded to us with an RAA after announcement.
1971 OutboundHTLCState::Committed => included_htlcs += 1,
1972 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1973 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
1978 let num_htlcs = included_htlcs + addl_htlcs;
1979 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1980 #[cfg(any(test, fuzzing))]
1983 if fee_spike_buffer_htlc.is_some() {
1984 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1986 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
1987 let commitment_tx_info = CommitmentTxInfoCached {
1989 total_pending_htlcs,
1990 next_holder_htlc_id: match htlc.origin {
1991 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1992 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1994 next_counterparty_htlc_id: match htlc.origin {
1995 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1996 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1998 feerate: context.feerate_per_kw,
2000 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2005 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2006 where F: Fn() -> Option<O> {
2007 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2008 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2015 /// Returns the transaction if there is a pending funding transaction that is yet to be
2017 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2018 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2021 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2023 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2024 self.if_unbroadcasted_funding(||
2025 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2029 /// Returns whether the channel is funded in a batch.
2030 pub fn is_batch_funding(&self) -> bool {
2031 self.is_batch_funding.is_some()
2034 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2036 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2037 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2040 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2041 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2042 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2043 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2044 /// immediately (others we will have to allow to time out).
2045 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2046 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2047 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2048 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2049 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2050 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2052 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2053 // return them to fail the payment.
2054 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2055 let counterparty_node_id = self.get_counterparty_node_id();
2056 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2058 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2059 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2064 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2065 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2066 // returning a channel monitor update here would imply a channel monitor update before
2067 // we even registered the channel monitor to begin with, which is invalid.
2068 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2069 // funding transaction, don't return a funding txo (which prevents providing the
2070 // monitor update to the user, even if we return one).
2071 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2072 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2073 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2074 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2075 update_id: self.latest_monitor_update_id,
2076 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2080 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2082 self.channel_state = ChannelState::ShutdownComplete as u32;
2083 self.update_time_counter += 1;
2086 dropped_outbound_htlcs,
2087 unbroadcasted_batch_funding_txid,
2092 // Internal utility functions for channels
2094 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2095 /// `channel_value_satoshis` in msat, set through
2096 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2098 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2100 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2101 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2102 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2104 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2107 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2109 channel_value_satoshis * 10 * configured_percent
2112 /// Returns a minimum channel reserve value the remote needs to maintain,
2113 /// required by us according to the configured or default
2114 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2116 /// Guaranteed to return a value no larger than channel_value_satoshis
2118 /// This is used both for outbound and inbound channels and has lower bound
2119 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2120 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2121 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2122 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2125 /// This is for legacy reasons, present for forward-compatibility.
2126 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2127 /// from storage. Hence, we use this function to not persist default values of
2128 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2129 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2130 let (q, _) = channel_value_satoshis.overflowing_div(100);
2131 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2134 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2135 // Note that num_htlcs should not include dust HTLCs.
2137 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2138 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2141 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2142 // Note that num_htlcs should not include dust HTLCs.
2143 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2144 // Note that we need to divide before multiplying to round properly,
2145 // since the lowest denomination of bitcoin on-chain is the satoshi.
2146 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2149 // Holder designates channel data owned for the benefit of the user client.
2150 // Counterparty designates channel data owned by the another channel participant entity.
2151 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2152 pub context: ChannelContext<SP>,
2155 #[cfg(any(test, fuzzing))]
2156 struct CommitmentTxInfoCached {
2158 total_pending_htlcs: usize,
2159 next_holder_htlc_id: u64,
2160 next_counterparty_htlc_id: u64,
2164 impl<SP: Deref> Channel<SP> where
2165 SP::Target: SignerProvider,
2166 <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2168 fn check_remote_fee<F: Deref, L: Deref>(
2169 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2170 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2171 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2173 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2174 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2175 // We generally don't care too much if they set the feerate to something very high, but it
2176 // could result in the channel being useless due to everything being dust. This doesn't
2177 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2178 // zero fee, so their fee is no longer considered to determine dust limits.
2179 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2181 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64;
2182 if feerate_per_kw as u64 > upper_limit {
2183 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2187 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2188 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2190 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2192 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2193 if feerate_per_kw < lower_limit {
2194 if let Some(cur_feerate) = cur_feerate_per_kw {
2195 if feerate_per_kw > cur_feerate {
2197 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2198 cur_feerate, feerate_per_kw);
2202 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2208 fn get_closing_scriptpubkey(&self) -> Script {
2209 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2210 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2211 // outside of those situations will fail.
2212 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2216 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2221 1 + // script length (0)
2225 )*4 + // * 4 for non-witness parts
2226 2 + // witness marker and flag
2227 1 + // witness element count
2228 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2229 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2230 2*(1 + 71); // two signatures + sighash type flags
2231 if let Some(spk) = a_scriptpubkey {
2232 ret += ((8+1) + // output values and script length
2233 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2235 if let Some(spk) = b_scriptpubkey {
2236 ret += ((8+1) + // output values and script length
2237 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2243 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2244 assert!(self.context.pending_inbound_htlcs.is_empty());
2245 assert!(self.context.pending_outbound_htlcs.is_empty());
2246 assert!(self.context.pending_update_fee.is_none());
2248 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2249 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2250 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2252 if value_to_holder < 0 {
2253 assert!(self.context.is_outbound());
2254 total_fee_satoshis += (-value_to_holder) as u64;
2255 } else if value_to_counterparty < 0 {
2256 assert!(!self.context.is_outbound());
2257 total_fee_satoshis += (-value_to_counterparty) as u64;
2260 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2261 value_to_counterparty = 0;
2264 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2265 value_to_holder = 0;
2268 assert!(self.context.shutdown_scriptpubkey.is_some());
2269 let holder_shutdown_script = self.get_closing_scriptpubkey();
2270 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2271 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2273 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2274 (closing_transaction, total_fee_satoshis)
2277 fn funding_outpoint(&self) -> OutPoint {
2278 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2281 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2284 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2285 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2287 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2289 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2290 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2291 where L::Target: Logger {
2292 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2293 // (see equivalent if condition there).
2294 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2295 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2296 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2297 self.context.latest_monitor_update_id = mon_update_id;
2298 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2299 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2303 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2304 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2305 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2306 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2308 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2309 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2311 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2313 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2314 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2315 // these, but for now we just have to treat them as normal.
2317 let mut pending_idx = core::usize::MAX;
2318 let mut htlc_value_msat = 0;
2319 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2320 if htlc.htlc_id == htlc_id_arg {
2321 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2322 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2323 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2325 InboundHTLCState::Committed => {},
2326 InboundHTLCState::LocalRemoved(ref reason) => {
2327 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2329 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2330 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2332 return UpdateFulfillFetch::DuplicateClaim {};
2335 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2336 // Don't return in release mode here so that we can update channel_monitor
2340 htlc_value_msat = htlc.amount_msat;
2344 if pending_idx == core::usize::MAX {
2345 #[cfg(any(test, fuzzing))]
2346 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2347 // this is simply a duplicate claim, not previously failed and we lost funds.
2348 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2349 return UpdateFulfillFetch::DuplicateClaim {};
2352 // Now update local state:
2354 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2355 // can claim it even if the channel hits the chain before we see their next commitment.
2356 self.context.latest_monitor_update_id += 1;
2357 let monitor_update = ChannelMonitorUpdate {
2358 update_id: self.context.latest_monitor_update_id,
2359 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2360 payment_preimage: payment_preimage_arg.clone(),
2364 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2365 // Note that this condition is the same as the assertion in
2366 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2367 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2368 // do not not get into this branch.
2369 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2370 match pending_update {
2371 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2372 if htlc_id_arg == htlc_id {
2373 // Make sure we don't leave latest_monitor_update_id incremented here:
2374 self.context.latest_monitor_update_id -= 1;
2375 #[cfg(any(test, fuzzing))]
2376 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2377 return UpdateFulfillFetch::DuplicateClaim {};
2380 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2381 if htlc_id_arg == htlc_id {
2382 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2383 // TODO: We may actually be able to switch to a fulfill here, though its
2384 // rare enough it may not be worth the complexity burden.
2385 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2386 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2392 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2393 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2394 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2396 #[cfg(any(test, fuzzing))]
2397 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2398 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2400 #[cfg(any(test, fuzzing))]
2401 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2404 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2405 if let InboundHTLCState::Committed = htlc.state {
2407 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2408 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2410 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2411 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2414 UpdateFulfillFetch::NewClaim {
2417 msg: Some(msgs::UpdateFulfillHTLC {
2418 channel_id: self.context.channel_id(),
2419 htlc_id: htlc_id_arg,
2420 payment_preimage: payment_preimage_arg,
2425 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2426 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2427 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2428 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2429 // Even if we aren't supposed to let new monitor updates with commitment state
2430 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2431 // matter what. Sadly, to push a new monitor update which flies before others
2432 // already queued, we have to insert it into the pending queue and update the
2433 // update_ids of all the following monitors.
2434 if release_cs_monitor && msg.is_some() {
2435 let mut additional_update = self.build_commitment_no_status_check(logger);
2436 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2437 // to be strictly increasing by one, so decrement it here.
2438 self.context.latest_monitor_update_id = monitor_update.update_id;
2439 monitor_update.updates.append(&mut additional_update.updates);
2441 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2442 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2443 monitor_update.update_id = new_mon_id;
2444 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2445 held_update.update.update_id += 1;
2448 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2449 let update = self.build_commitment_no_status_check(logger);
2450 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2456 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2457 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2459 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2463 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2464 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2465 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2466 /// before we fail backwards.
2468 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2469 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2470 /// [`ChannelError::Ignore`].
2471 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2472 -> Result<(), ChannelError> where L::Target: Logger {
2473 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2474 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2477 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2478 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2479 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2480 /// before we fail backwards.
2482 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2483 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2484 /// [`ChannelError::Ignore`].
2485 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2486 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2487 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2488 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2490 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2492 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2493 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2494 // these, but for now we just have to treat them as normal.
2496 let mut pending_idx = core::usize::MAX;
2497 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2498 if htlc.htlc_id == htlc_id_arg {
2500 InboundHTLCState::Committed => {},
2501 InboundHTLCState::LocalRemoved(ref reason) => {
2502 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2504 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2509 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2510 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2516 if pending_idx == core::usize::MAX {
2517 #[cfg(any(test, fuzzing))]
2518 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2519 // is simply a duplicate fail, not previously failed and we failed-back too early.
2520 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2524 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2525 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2526 force_holding_cell = true;
2529 // Now update local state:
2530 if force_holding_cell {
2531 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2532 match pending_update {
2533 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2534 if htlc_id_arg == htlc_id {
2535 #[cfg(any(test, fuzzing))]
2536 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2540 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2541 if htlc_id_arg == htlc_id {
2542 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2543 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2549 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2550 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2551 htlc_id: htlc_id_arg,
2557 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2559 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2560 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2563 Ok(Some(msgs::UpdateFailHTLC {
2564 channel_id: self.context.channel_id(),
2565 htlc_id: htlc_id_arg,
2570 // Message handlers:
2572 /// Handles a funding_signed message from the remote end.
2573 /// If this call is successful, broadcast the funding transaction (and not before!)
2574 pub fn funding_signed<L: Deref>(
2575 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2576 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2580 if !self.context.is_outbound() {
2581 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2583 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2584 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2586 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2587 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2588 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2589 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2592 let funding_script = self.context.get_funding_redeemscript();
2594 let counterparty_keys = self.context.build_remote_transaction_keys();
2595 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2596 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2597 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2599 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2600 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2602 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2603 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2605 let trusted_tx = initial_commitment_tx.trust();
2606 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2607 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2608 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2609 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2610 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2614 let holder_commitment_tx = HolderCommitmentTransaction::new(
2615 initial_commitment_tx,
2618 &self.context.get_holder_pubkeys().funding_pubkey,
2619 self.context.counterparty_funding_pubkey()
2622 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2623 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2626 let funding_redeemscript = self.context.get_funding_redeemscript();
2627 let funding_txo = self.context.get_funding_txo().unwrap();
2628 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2629 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2630 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2631 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2632 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2633 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2634 shutdown_script, self.context.get_holder_selected_contest_delay(),
2635 &self.context.destination_script, (funding_txo, funding_txo_script),
2636 &self.context.channel_transaction_parameters,
2637 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2639 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2641 channel_monitor.provide_initial_counterparty_commitment_tx(
2642 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2643 self.context.cur_counterparty_commitment_transaction_number,
2644 self.context.counterparty_cur_commitment_point.unwrap(),
2645 counterparty_initial_commitment_tx.feerate_per_kw(),
2646 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2647 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2649 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2650 if self.context.is_batch_funding() {
2651 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2653 self.context.channel_state = ChannelState::FundingSent as u32;
2655 self.context.cur_holder_commitment_transaction_number -= 1;
2656 self.context.cur_counterparty_commitment_transaction_number -= 1;
2658 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2660 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2661 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2665 /// Updates the state of the channel to indicate that all channels in the batch have received
2666 /// funding_signed and persisted their monitors.
2667 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2668 /// treated as a non-batch channel going forward.
2669 pub fn set_batch_ready(&mut self) {
2670 self.context.is_batch_funding = None;
2671 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2674 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2675 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2677 pub fn channel_ready<NS: Deref, L: Deref>(
2678 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2679 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2680 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2682 NS::Target: NodeSigner,
2685 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2686 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2687 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2690 if let Some(scid_alias) = msg.short_channel_id_alias {
2691 if Some(scid_alias) != self.context.short_channel_id {
2692 // The scid alias provided can be used to route payments *from* our counterparty,
2693 // i.e. can be used for inbound payments and provided in invoices, but is not used
2694 // when routing outbound payments.
2695 self.context.latest_inbound_scid_alias = Some(scid_alias);
2699 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2701 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2702 // batch, but we can receive channel_ready messages.
2704 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2705 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2707 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2708 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2709 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2710 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2711 self.context.update_time_counter += 1;
2712 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2713 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2714 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2715 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2717 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2718 // required, or they're sending a fresh SCID alias.
2719 let expected_point =
2720 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2721 // If they haven't ever sent an updated point, the point they send should match
2723 self.context.counterparty_cur_commitment_point
2724 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2725 // If we've advanced the commitment number once, the second commitment point is
2726 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2727 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2728 self.context.counterparty_prev_commitment_point
2730 // If they have sent updated points, channel_ready is always supposed to match
2731 // their "first" point, which we re-derive here.
2732 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2733 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2734 ).expect("We already advanced, so previous secret keys should have been validated already")))
2736 if expected_point != Some(msg.next_per_commitment_point) {
2737 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2741 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2744 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2745 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2747 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2749 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2752 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2753 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2754 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2755 ) -> Result<(), ChannelError>
2756 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2757 FE::Target: FeeEstimator, L::Target: Logger,
2759 // We can't accept HTLCs sent after we've sent a shutdown.
2760 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2761 if local_sent_shutdown {
2762 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2764 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2765 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2766 if remote_sent_shutdown {
2767 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2769 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2770 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2772 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2773 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2775 if msg.amount_msat == 0 {
2776 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2778 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2779 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2782 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2783 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2784 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2785 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2787 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2788 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2791 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2792 // the reserve_satoshis we told them to always have as direct payment so that they lose
2793 // something if we punish them for broadcasting an old state).
2794 // Note that we don't really care about having a small/no to_remote output in our local
2795 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2796 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2797 // present in the next commitment transaction we send them (at least for fulfilled ones,
2798 // failed ones won't modify value_to_self).
2799 // Note that we will send HTLCs which another instance of rust-lightning would think
2800 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2801 // Channel state once they will not be present in the next received commitment
2803 let mut removed_outbound_total_msat = 0;
2804 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2805 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2806 removed_outbound_total_msat += htlc.amount_msat;
2807 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2808 removed_outbound_total_msat += htlc.amount_msat;
2812 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2813 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2816 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2817 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2818 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2820 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2821 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2822 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2823 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2824 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2825 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2826 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2830 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2831 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2832 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2833 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2834 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2835 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2836 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2840 let pending_value_to_self_msat =
2841 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2842 let pending_remote_value_msat =
2843 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2844 if pending_remote_value_msat < msg.amount_msat {
2845 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2848 // Check that the remote can afford to pay for this HTLC on-chain at the current
2849 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2851 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2852 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2853 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2855 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2856 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2860 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2861 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2863 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2864 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2868 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2869 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2873 if !self.context.is_outbound() {
2874 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2875 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2876 // side, only on the sender's. Note that with anchor outputs we are no longer as
2877 // sensitive to fee spikes, so we need to account for them.
2878 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2879 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2880 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2881 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2883 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2884 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2885 // the HTLC, i.e. its status is already set to failing.
2886 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2887 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2890 // Check that they won't violate our local required channel reserve by adding this HTLC.
2891 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2892 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2893 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2894 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
2897 if self.context.next_counterparty_htlc_id != msg.htlc_id {
2898 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
2900 if msg.cltv_expiry >= 500000000 {
2901 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
2904 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
2905 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
2906 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
2910 // Now update local state:
2911 self.context.next_counterparty_htlc_id += 1;
2912 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
2913 htlc_id: msg.htlc_id,
2914 amount_msat: msg.amount_msat,
2915 payment_hash: msg.payment_hash,
2916 cltv_expiry: msg.cltv_expiry,
2917 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
2922 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2924 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
2925 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
2926 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
2927 if htlc.htlc_id == htlc_id {
2928 let outcome = match check_preimage {
2929 None => fail_reason.into(),
2930 Some(payment_preimage) => {
2931 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
2932 if payment_hash != htlc.payment_hash {
2933 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
2935 OutboundHTLCOutcome::Success(Some(payment_preimage))
2939 OutboundHTLCState::LocalAnnounced(_) =>
2940 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
2941 OutboundHTLCState::Committed => {
2942 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
2944 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
2945 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
2950 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
2953 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
2954 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2955 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
2957 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2958 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
2961 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
2964 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2965 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2966 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
2968 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2969 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
2972 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2976 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
2977 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2978 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
2980 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2981 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
2984 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
2988 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
2989 where L::Target: Logger
2991 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2992 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
2994 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2995 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
2997 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
2998 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3001 let funding_script = self.context.get_funding_redeemscript();
3003 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3005 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3006 let commitment_txid = {
3007 let trusted_tx = commitment_stats.tx.trust();
3008 let bitcoin_tx = trusted_tx.built_transaction();
3009 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3011 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3012 log_bytes!(msg.signature.serialize_compact()[..]),
3013 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3014 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3015 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3016 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3020 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3022 // If our counterparty updated the channel fee in this commitment transaction, check that
3023 // they can actually afford the new fee now.
3024 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3025 update_state == FeeUpdateState::RemoteAnnounced
3028 debug_assert!(!self.context.is_outbound());
3029 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3030 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3031 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3034 #[cfg(any(test, fuzzing))]
3036 if self.context.is_outbound() {
3037 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3038 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3039 if let Some(info) = projected_commit_tx_info {
3040 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3041 + self.context.holding_cell_htlc_updates.len();
3042 if info.total_pending_htlcs == total_pending_htlcs
3043 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3044 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3045 && info.feerate == self.context.feerate_per_kw {
3046 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3052 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3053 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3056 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3057 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3058 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3059 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3060 // backwards compatibility, we never use it in production. To provide test coverage, here,
3061 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3062 #[allow(unused_assignments, unused_mut)]
3063 let mut separate_nondust_htlc_sources = false;
3064 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3065 use core::hash::{BuildHasher, Hasher};
3066 // Get a random value using the only std API to do so - the DefaultHasher
3067 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3068 separate_nondust_htlc_sources = rand_val % 2 == 0;
3071 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3072 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3073 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3074 if let Some(_) = htlc.transaction_output_index {
3075 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3076 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3077 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3079 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3080 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3081 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3082 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3083 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3084 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3085 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3086 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3088 if !separate_nondust_htlc_sources {
3089 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3092 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3094 if separate_nondust_htlc_sources {
3095 if let Some(source) = source_opt.take() {
3096 nondust_htlc_sources.push(source);
3099 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3102 let holder_commitment_tx = HolderCommitmentTransaction::new(
3103 commitment_stats.tx,
3105 msg.htlc_signatures.clone(),
3106 &self.context.get_holder_pubkeys().funding_pubkey,
3107 self.context.counterparty_funding_pubkey()
3110 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3111 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3113 // Update state now that we've passed all the can-fail calls...
3114 let mut need_commitment = false;
3115 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3116 if *update_state == FeeUpdateState::RemoteAnnounced {
3117 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3118 need_commitment = true;
3122 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3123 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3124 Some(forward_info.clone())
3126 if let Some(forward_info) = new_forward {
3127 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3128 &htlc.payment_hash, &self.context.channel_id);
3129 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3130 need_commitment = true;
3133 let mut claimed_htlcs = Vec::new();
3134 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3135 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3136 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3137 &htlc.payment_hash, &self.context.channel_id);
3138 // Grab the preimage, if it exists, instead of cloning
3139 let mut reason = OutboundHTLCOutcome::Success(None);
3140 mem::swap(outcome, &mut reason);
3141 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3142 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3143 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3144 // have a `Success(None)` reason. In this case we could forget some HTLC
3145 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3146 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3148 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3150 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3151 need_commitment = true;
3155 self.context.latest_monitor_update_id += 1;
3156 let mut monitor_update = ChannelMonitorUpdate {
3157 update_id: self.context.latest_monitor_update_id,
3158 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3159 commitment_tx: holder_commitment_tx,
3160 htlc_outputs: htlcs_and_sigs,
3162 nondust_htlc_sources,
3166 self.context.cur_holder_commitment_transaction_number -= 1;
3167 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3168 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3169 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3171 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3172 // In case we initially failed monitor updating without requiring a response, we need
3173 // to make sure the RAA gets sent first.
3174 self.context.monitor_pending_revoke_and_ack = true;
3175 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3176 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3177 // the corresponding HTLC status updates so that
3178 // get_last_commitment_update_for_send includes the right HTLCs.
3179 self.context.monitor_pending_commitment_signed = true;
3180 let mut additional_update = self.build_commitment_no_status_check(logger);
3181 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3182 // strictly increasing by one, so decrement it here.
3183 self.context.latest_monitor_update_id = monitor_update.update_id;
3184 monitor_update.updates.append(&mut additional_update.updates);
3186 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3187 &self.context.channel_id);
3188 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3191 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3192 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3193 // we'll send one right away when we get the revoke_and_ack when we
3194 // free_holding_cell_htlcs().
3195 let mut additional_update = self.build_commitment_no_status_check(logger);
3196 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3197 // strictly increasing by one, so decrement it here.
3198 self.context.latest_monitor_update_id = monitor_update.update_id;
3199 monitor_update.updates.append(&mut additional_update.updates);
3203 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3204 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3205 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3206 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3209 /// Public version of the below, checking relevant preconditions first.
3210 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3211 /// returns `(None, Vec::new())`.
3212 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3213 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3214 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3215 where F::Target: FeeEstimator, L::Target: Logger
3217 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3218 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3219 self.free_holding_cell_htlcs(fee_estimator, logger)
3220 } else { (None, Vec::new()) }
3223 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3224 /// for our counterparty.
3225 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3226 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3227 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3228 where F::Target: FeeEstimator, L::Target: Logger
3230 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3231 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3232 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3233 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3235 let mut monitor_update = ChannelMonitorUpdate {
3236 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3237 updates: Vec::new(),
3240 let mut htlc_updates = Vec::new();
3241 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3242 let mut update_add_count = 0;
3243 let mut update_fulfill_count = 0;
3244 let mut update_fail_count = 0;
3245 let mut htlcs_to_fail = Vec::new();
3246 for htlc_update in htlc_updates.drain(..) {
3247 // Note that this *can* fail, though it should be due to rather-rare conditions on
3248 // fee races with adding too many outputs which push our total payments just over
3249 // the limit. In case it's less rare than I anticipate, we may want to revisit
3250 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3251 // to rebalance channels.
3252 match &htlc_update {
3253 &HTLCUpdateAwaitingACK::AddHTLC {
3254 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3255 skimmed_fee_msat, ..
3257 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3258 onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3260 Ok(_) => update_add_count += 1,
3263 ChannelError::Ignore(ref msg) => {
3264 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3265 // If we fail to send here, then this HTLC should
3266 // be failed backwards. Failing to send here
3267 // indicates that this HTLC may keep being put back
3268 // into the holding cell without ever being
3269 // successfully forwarded/failed/fulfilled, causing
3270 // our counterparty to eventually close on us.
3271 htlcs_to_fail.push((source.clone(), *payment_hash));
3274 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3280 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3281 // If an HTLC claim was previously added to the holding cell (via
3282 // `get_update_fulfill_htlc`, then generating the claim message itself must
3283 // not fail - any in between attempts to claim the HTLC will have resulted
3284 // in it hitting the holding cell again and we cannot change the state of a
3285 // holding cell HTLC from fulfill to anything else.
3286 let mut additional_monitor_update =
3287 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3288 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3289 { monitor_update } else { unreachable!() };
3290 update_fulfill_count += 1;
3291 monitor_update.updates.append(&mut additional_monitor_update.updates);
3293 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3294 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3295 Ok(update_fail_msg_option) => {
3296 // If an HTLC failure was previously added to the holding cell (via
3297 // `queue_fail_htlc`) then generating the fail message itself must
3298 // not fail - we should never end up in a state where we double-fail
3299 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3300 // for a full revocation before failing.
3301 debug_assert!(update_fail_msg_option.is_some());
3302 update_fail_count += 1;
3305 if let ChannelError::Ignore(_) = e {}
3307 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3314 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3315 return (None, htlcs_to_fail);
3317 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3318 self.send_update_fee(feerate, false, fee_estimator, logger)
3323 let mut additional_update = self.build_commitment_no_status_check(logger);
3324 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3325 // but we want them to be strictly increasing by one, so reset it here.
3326 self.context.latest_monitor_update_id = monitor_update.update_id;
3327 monitor_update.updates.append(&mut additional_update.updates);
3329 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3330 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3331 update_add_count, update_fulfill_count, update_fail_count);
3333 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3334 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3340 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3341 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3342 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3343 /// generating an appropriate error *after* the channel state has been updated based on the
3344 /// revoke_and_ack message.
3345 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3346 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3347 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3348 where F::Target: FeeEstimator, L::Target: Logger,
3350 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3351 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3353 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3354 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3356 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3357 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3360 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3362 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3363 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3364 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3368 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3369 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3370 // haven't given them a new commitment transaction to broadcast). We should probably
3371 // take advantage of this by updating our channel monitor, sending them an error, and
3372 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3373 // lot of work, and there's some chance this is all a misunderstanding anyway.
3374 // We have to do *something*, though, since our signer may get mad at us for otherwise
3375 // jumping a remote commitment number, so best to just force-close and move on.
3376 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3379 #[cfg(any(test, fuzzing))]
3381 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3382 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3385 match &self.context.holder_signer {
3386 ChannelSignerType::Ecdsa(ecdsa) => {
3387 ecdsa.validate_counterparty_revocation(
3388 self.context.cur_counterparty_commitment_transaction_number + 1,
3390 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3394 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3395 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3396 self.context.latest_monitor_update_id += 1;
3397 let mut monitor_update = ChannelMonitorUpdate {
3398 update_id: self.context.latest_monitor_update_id,
3399 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3400 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3401 secret: msg.per_commitment_secret,
3405 // Update state now that we've passed all the can-fail calls...
3406 // (note that we may still fail to generate the new commitment_signed message, but that's
3407 // OK, we step the channel here and *then* if the new generation fails we can fail the
3408 // channel based on that, but stepping stuff here should be safe either way.
3409 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3410 self.context.sent_message_awaiting_response = None;
3411 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3412 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3413 self.context.cur_counterparty_commitment_transaction_number -= 1;
3415 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3416 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3419 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3420 let mut to_forward_infos = Vec::new();
3421 let mut revoked_htlcs = Vec::new();
3422 let mut finalized_claimed_htlcs = Vec::new();
3423 let mut update_fail_htlcs = Vec::new();
3424 let mut update_fail_malformed_htlcs = Vec::new();
3425 let mut require_commitment = false;
3426 let mut value_to_self_msat_diff: i64 = 0;
3429 // Take references explicitly so that we can hold multiple references to self.context.
3430 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3431 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3433 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3434 pending_inbound_htlcs.retain(|htlc| {
3435 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3436 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3437 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3438 value_to_self_msat_diff += htlc.amount_msat as i64;
3443 pending_outbound_htlcs.retain(|htlc| {
3444 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3445 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3446 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3447 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3449 finalized_claimed_htlcs.push(htlc.source.clone());
3450 // They fulfilled, so we sent them money
3451 value_to_self_msat_diff -= htlc.amount_msat as i64;
3456 for htlc in pending_inbound_htlcs.iter_mut() {
3457 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3459 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3463 let mut state = InboundHTLCState::Committed;
3464 mem::swap(&mut state, &mut htlc.state);
3466 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3467 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3468 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3469 require_commitment = true;
3470 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3471 match forward_info {
3472 PendingHTLCStatus::Fail(fail_msg) => {
3473 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3474 require_commitment = true;
3476 HTLCFailureMsg::Relay(msg) => {
3477 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3478 update_fail_htlcs.push(msg)
3480 HTLCFailureMsg::Malformed(msg) => {
3481 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3482 update_fail_malformed_htlcs.push(msg)
3486 PendingHTLCStatus::Forward(forward_info) => {
3487 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3488 to_forward_infos.push((forward_info, htlc.htlc_id));
3489 htlc.state = InboundHTLCState::Committed;
3495 for htlc in pending_outbound_htlcs.iter_mut() {
3496 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3497 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3498 htlc.state = OutboundHTLCState::Committed;
3500 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3501 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3502 // Grab the preimage, if it exists, instead of cloning
3503 let mut reason = OutboundHTLCOutcome::Success(None);
3504 mem::swap(outcome, &mut reason);
3505 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3506 require_commitment = true;
3510 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3512 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3513 match update_state {
3514 FeeUpdateState::Outbound => {
3515 debug_assert!(self.context.is_outbound());
3516 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3517 self.context.feerate_per_kw = feerate;
3518 self.context.pending_update_fee = None;
3520 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3521 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3522 debug_assert!(!self.context.is_outbound());
3523 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3524 require_commitment = true;
3525 self.context.feerate_per_kw = feerate;
3526 self.context.pending_update_fee = None;
3531 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3532 let release_state_str =
3533 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3534 macro_rules! return_with_htlcs_to_fail {
3535 ($htlcs_to_fail: expr) => {
3536 if !release_monitor {
3537 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3538 update: monitor_update,
3540 return Ok(($htlcs_to_fail, None));
3542 return Ok(($htlcs_to_fail, Some(monitor_update)));
3547 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3548 // We can't actually generate a new commitment transaction (incl by freeing holding
3549 // cells) while we can't update the monitor, so we just return what we have.
3550 if require_commitment {
3551 self.context.monitor_pending_commitment_signed = true;
3552 // When the monitor updating is restored we'll call
3553 // get_last_commitment_update_for_send(), which does not update state, but we're
3554 // definitely now awaiting a remote revoke before we can step forward any more, so
3556 let mut additional_update = self.build_commitment_no_status_check(logger);
3557 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3558 // strictly increasing by one, so decrement it here.
3559 self.context.latest_monitor_update_id = monitor_update.update_id;
3560 monitor_update.updates.append(&mut additional_update.updates);
3562 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3563 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3564 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3565 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3566 return_with_htlcs_to_fail!(Vec::new());
3569 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3570 (Some(mut additional_update), htlcs_to_fail) => {
3571 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3572 // strictly increasing by one, so decrement it here.
3573 self.context.latest_monitor_update_id = monitor_update.update_id;
3574 monitor_update.updates.append(&mut additional_update.updates);
3576 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3577 &self.context.channel_id(), release_state_str);
3579 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3580 return_with_htlcs_to_fail!(htlcs_to_fail);
3582 (None, htlcs_to_fail) => {
3583 if require_commitment {
3584 let mut additional_update = self.build_commitment_no_status_check(logger);
3586 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3587 // strictly increasing by one, so decrement it here.
3588 self.context.latest_monitor_update_id = monitor_update.update_id;
3589 monitor_update.updates.append(&mut additional_update.updates);
3591 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3592 &self.context.channel_id(),
3593 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3596 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3597 return_with_htlcs_to_fail!(htlcs_to_fail);
3599 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3600 &self.context.channel_id(), release_state_str);
3602 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3603 return_with_htlcs_to_fail!(htlcs_to_fail);
3609 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3610 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3611 /// commitment update.
3612 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3613 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3614 where F::Target: FeeEstimator, L::Target: Logger
3616 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3617 assert!(msg_opt.is_none(), "We forced holding cell?");
3620 /// Adds a pending update to this channel. See the doc for send_htlc for
3621 /// further details on the optionness of the return value.
3622 /// If our balance is too low to cover the cost of the next commitment transaction at the
3623 /// new feerate, the update is cancelled.
3625 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3626 /// [`Channel`] if `force_holding_cell` is false.
3627 fn send_update_fee<F: Deref, L: Deref>(
3628 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3629 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3630 ) -> Option<msgs::UpdateFee>
3631 where F::Target: FeeEstimator, L::Target: Logger
3633 if !self.context.is_outbound() {
3634 panic!("Cannot send fee from inbound channel");
3636 if !self.context.is_usable() {
3637 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3639 if !self.context.is_live() {
3640 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3643 // Before proposing a feerate update, check that we can actually afford the new fee.
3644 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3645 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3646 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3647 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3648 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3649 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3650 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3651 //TODO: auto-close after a number of failures?
3652 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3656 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3657 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3658 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3659 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3660 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3661 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3664 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3665 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3669 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3670 force_holding_cell = true;
3673 if force_holding_cell {
3674 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3678 debug_assert!(self.context.pending_update_fee.is_none());
3679 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3681 Some(msgs::UpdateFee {
3682 channel_id: self.context.channel_id,
3687 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3688 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3690 /// No further message handling calls may be made until a channel_reestablish dance has
3692 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3693 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3694 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3695 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3699 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3700 // While the below code should be idempotent, it's simpler to just return early, as
3701 // redundant disconnect events can fire, though they should be rare.
3705 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3706 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3709 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3710 // will be retransmitted.
3711 self.context.last_sent_closing_fee = None;
3712 self.context.pending_counterparty_closing_signed = None;
3713 self.context.closing_fee_limits = None;
3715 let mut inbound_drop_count = 0;
3716 self.context.pending_inbound_htlcs.retain(|htlc| {
3718 InboundHTLCState::RemoteAnnounced(_) => {
3719 // They sent us an update_add_htlc but we never got the commitment_signed.
3720 // We'll tell them what commitment_signed we're expecting next and they'll drop
3721 // this HTLC accordingly
3722 inbound_drop_count += 1;
3725 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3726 // We received a commitment_signed updating this HTLC and (at least hopefully)
3727 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3728 // in response to it yet, so don't touch it.
3731 InboundHTLCState::Committed => true,
3732 InboundHTLCState::LocalRemoved(_) => {
3733 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3734 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3735 // (that we missed). Keep this around for now and if they tell us they missed
3736 // the commitment_signed we can re-transmit the update then.
3741 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3743 if let Some((_, update_state)) = self.context.pending_update_fee {
3744 if update_state == FeeUpdateState::RemoteAnnounced {
3745 debug_assert!(!self.context.is_outbound());
3746 self.context.pending_update_fee = None;
3750 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3751 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3752 // They sent us an update to remove this but haven't yet sent the corresponding
3753 // commitment_signed, we need to move it back to Committed and they can re-send
3754 // the update upon reconnection.
3755 htlc.state = OutboundHTLCState::Committed;
3759 self.context.sent_message_awaiting_response = None;
3761 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3762 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3766 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3767 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3768 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3769 /// update completes (potentially immediately).
3770 /// The messages which were generated with the monitor update must *not* have been sent to the
3771 /// remote end, and must instead have been dropped. They will be regenerated when
3772 /// [`Self::monitor_updating_restored`] is called.
3774 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3775 /// [`chain::Watch`]: crate::chain::Watch
3776 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3777 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3778 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3779 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3780 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3782 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3783 self.context.monitor_pending_commitment_signed |= resend_commitment;
3784 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3785 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3786 self.context.monitor_pending_failures.append(&mut pending_fails);
3787 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3788 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3791 /// Indicates that the latest ChannelMonitor update has been committed by the client
3792 /// successfully and we should restore normal operation. Returns messages which should be sent
3793 /// to the remote side.
3794 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3795 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3796 user_config: &UserConfig, best_block_height: u32
3797 ) -> MonitorRestoreUpdates
3800 NS::Target: NodeSigner
3802 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3803 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3805 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3806 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3807 // first received the funding_signed.
3808 let mut funding_broadcastable =
3809 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3810 self.context.funding_transaction.take()
3812 // That said, if the funding transaction is already confirmed (ie we're active with a
3813 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3814 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3815 funding_broadcastable = None;
3818 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3819 // (and we assume the user never directly broadcasts the funding transaction and waits for
3820 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3821 // * an inbound channel that failed to persist the monitor on funding_created and we got
3822 // the funding transaction confirmed before the monitor was persisted, or
3823 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3824 let channel_ready = if self.context.monitor_pending_channel_ready {
3825 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3826 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3827 self.context.monitor_pending_channel_ready = false;
3828 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3829 Some(msgs::ChannelReady {
3830 channel_id: self.context.channel_id(),
3831 next_per_commitment_point,
3832 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3836 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3838 let mut accepted_htlcs = Vec::new();
3839 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3840 let mut failed_htlcs = Vec::new();
3841 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3842 let mut finalized_claimed_htlcs = Vec::new();
3843 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3845 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3846 self.context.monitor_pending_revoke_and_ack = false;
3847 self.context.monitor_pending_commitment_signed = false;
3848 return MonitorRestoreUpdates {
3849 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3850 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3854 let raa = if self.context.monitor_pending_revoke_and_ack {
3855 Some(self.get_last_revoke_and_ack())
3857 let commitment_update = if self.context.monitor_pending_commitment_signed {
3858 self.get_last_commitment_update_for_send(logger).ok()
3860 if commitment_update.is_some() {
3861 self.mark_awaiting_response();
3864 self.context.monitor_pending_revoke_and_ack = false;
3865 self.context.monitor_pending_commitment_signed = false;
3866 let order = self.context.resend_order.clone();
3867 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3868 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3869 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3870 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3871 MonitorRestoreUpdates {
3872 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3876 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3877 where F::Target: FeeEstimator, L::Target: Logger
3879 if self.context.is_outbound() {
3880 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3882 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3883 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3885 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3886 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
3888 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3889 self.context.update_time_counter += 1;
3890 // If the feerate has increased over the previous dust buffer (note that
3891 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
3892 // won't be pushed over our dust exposure limit by the feerate increase.
3893 if feerate_over_dust_buffer {
3894 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3895 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3896 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3897 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3898 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3899 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3900 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
3901 msg.feerate_per_kw, holder_tx_dust_exposure)));
3903 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3904 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
3905 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
3911 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
3912 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3913 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
3914 msgs::RevokeAndACK {
3915 channel_id: self.context.channel_id,
3916 per_commitment_secret,
3917 next_per_commitment_point,
3919 next_local_nonce: None,
3923 /// Gets the last commitment update for immediate sending to our peer.
3924 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
3925 let mut update_add_htlcs = Vec::new();
3926 let mut update_fulfill_htlcs = Vec::new();
3927 let mut update_fail_htlcs = Vec::new();
3928 let mut update_fail_malformed_htlcs = Vec::new();
3930 for htlc in self.context.pending_outbound_htlcs.iter() {
3931 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
3932 update_add_htlcs.push(msgs::UpdateAddHTLC {
3933 channel_id: self.context.channel_id(),
3934 htlc_id: htlc.htlc_id,
3935 amount_msat: htlc.amount_msat,
3936 payment_hash: htlc.payment_hash,
3937 cltv_expiry: htlc.cltv_expiry,
3938 onion_routing_packet: (**onion_packet).clone(),
3939 skimmed_fee_msat: htlc.skimmed_fee_msat,
3944 for htlc in self.context.pending_inbound_htlcs.iter() {
3945 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3947 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
3948 update_fail_htlcs.push(msgs::UpdateFailHTLC {
3949 channel_id: self.context.channel_id(),
3950 htlc_id: htlc.htlc_id,
3951 reason: err_packet.clone()
3954 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
3955 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
3956 channel_id: self.context.channel_id(),
3957 htlc_id: htlc.htlc_id,
3958 sha256_of_onion: sha256_of_onion.clone(),
3959 failure_code: failure_code.clone(),
3962 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
3963 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
3964 channel_id: self.context.channel_id(),
3965 htlc_id: htlc.htlc_id,
3966 payment_preimage: payment_preimage.clone(),
3973 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
3974 Some(msgs::UpdateFee {
3975 channel_id: self.context.channel_id(),
3976 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
3980 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3981 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
3982 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3983 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
3984 if self.context.signer_pending_commitment_update {
3985 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
3986 self.context.signer_pending_commitment_update = false;
3990 if !self.context.signer_pending_commitment_update {
3991 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
3992 self.context.signer_pending_commitment_update = true;
3996 Ok(msgs::CommitmentUpdate {
3997 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4002 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4003 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4004 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4005 assert!(self.context.shutdown_scriptpubkey.is_some());
4006 Some(msgs::Shutdown {
4007 channel_id: self.context.channel_id,
4008 scriptpubkey: self.get_closing_scriptpubkey(),
4013 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4014 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4016 /// Some links printed in log lines are included here to check them during build (when run with
4017 /// `cargo doc --document-private-items`):
4018 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4019 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4020 pub fn channel_reestablish<L: Deref, NS: Deref>(
4021 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4022 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4023 ) -> Result<ReestablishResponses, ChannelError>
4026 NS::Target: NodeSigner
4028 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4029 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4030 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4031 // just close here instead of trying to recover.
4032 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4035 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4036 msg.next_local_commitment_number == 0 {
4037 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4040 if msg.next_remote_commitment_number > 0 {
4041 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4042 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4043 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4044 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4045 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4047 if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4048 macro_rules! log_and_panic {
4049 ($err_msg: expr) => {
4050 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4051 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4054 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4055 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4056 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4057 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4058 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4059 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4060 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4061 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4065 // Before we change the state of the channel, we check if the peer is sending a very old
4066 // commitment transaction number, if yes we send a warning message.
4067 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4068 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4070 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4074 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4075 // remaining cases either succeed or ErrorMessage-fail).
4076 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4077 self.context.sent_message_awaiting_response = None;
4079 let shutdown_msg = self.get_outbound_shutdown();
4081 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4083 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4084 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4085 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4086 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4087 if msg.next_remote_commitment_number != 0 {
4088 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4090 // Short circuit the whole handler as there is nothing we can resend them
4091 return Ok(ReestablishResponses {
4092 channel_ready: None,
4093 raa: None, commitment_update: None,
4094 order: RAACommitmentOrder::CommitmentFirst,
4095 shutdown_msg, announcement_sigs,
4099 // We have OurChannelReady set!
4100 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4101 return Ok(ReestablishResponses {
4102 channel_ready: Some(msgs::ChannelReady {
4103 channel_id: self.context.channel_id(),
4104 next_per_commitment_point,
4105 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4107 raa: None, commitment_update: None,
4108 order: RAACommitmentOrder::CommitmentFirst,
4109 shutdown_msg, announcement_sigs,
4113 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4114 // Remote isn't waiting on any RevokeAndACK from us!
4115 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4117 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4118 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4119 self.context.monitor_pending_revoke_and_ack = true;
4122 Some(self.get_last_revoke_and_ack())
4125 return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4128 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4129 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4130 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4131 // the corresponding revoke_and_ack back yet.
4132 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4133 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4134 self.mark_awaiting_response();
4136 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4138 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4139 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4140 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4141 Some(msgs::ChannelReady {
4142 channel_id: self.context.channel_id(),
4143 next_per_commitment_point,
4144 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4148 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4149 if required_revoke.is_some() {
4150 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4152 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4155 Ok(ReestablishResponses {
4156 channel_ready, shutdown_msg, announcement_sigs,
4157 raa: required_revoke,
4158 commitment_update: None,
4159 order: self.context.resend_order.clone(),
4161 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4162 if required_revoke.is_some() {
4163 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4165 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4168 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4169 self.context.monitor_pending_commitment_signed = true;
4170 Ok(ReestablishResponses {
4171 channel_ready, shutdown_msg, announcement_sigs,
4172 commitment_update: None, raa: None,
4173 order: self.context.resend_order.clone(),
4176 Ok(ReestablishResponses {
4177 channel_ready, shutdown_msg, announcement_sigs,
4178 raa: required_revoke,
4179 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4180 order: self.context.resend_order.clone(),
4184 Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4188 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4189 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4190 /// at which point they will be recalculated.
4191 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4193 where F::Target: FeeEstimator
4195 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4197 // Propose a range from our current Background feerate to our Normal feerate plus our
4198 // force_close_avoidance_max_fee_satoshis.
4199 // If we fail to come to consensus, we'll have to force-close.
4200 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4201 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4202 // that we don't expect to need fee bumping
4203 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4204 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4206 // The spec requires that (when the channel does not have anchors) we only send absolute
4207 // channel fees no greater than the absolute channel fee on the current commitment
4208 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4209 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4210 // some force-closure by old nodes, but we wanted to close the channel anyway.
4212 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4213 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4214 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4215 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4218 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4219 // below our dust limit, causing the output to disappear. We don't bother handling this
4220 // case, however, as this should only happen if a channel is closed before any (material)
4221 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4222 // come to consensus with our counterparty on appropriate fees, however it should be a
4223 // relatively rare case. We can revisit this later, though note that in order to determine
4224 // if the funders' output is dust we have to know the absolute fee we're going to use.
4225 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4226 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4227 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4228 // We always add force_close_avoidance_max_fee_satoshis to our normal
4229 // feerate-calculated fee, but allow the max to be overridden if we're using a
4230 // target feerate-calculated fee.
4231 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4232 proposed_max_feerate as u64 * tx_weight / 1000)
4234 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4237 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4238 self.context.closing_fee_limits.clone().unwrap()
4241 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4242 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4243 /// this point if we're the funder we should send the initial closing_signed, and in any case
4244 /// shutdown should complete within a reasonable timeframe.
4245 fn closing_negotiation_ready(&self) -> bool {
4246 self.context.closing_negotiation_ready()
4249 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4250 /// an Err if no progress is being made and the channel should be force-closed instead.
4251 /// Should be called on a one-minute timer.
4252 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4253 if self.closing_negotiation_ready() {
4254 if self.context.closing_signed_in_flight {
4255 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4257 self.context.closing_signed_in_flight = true;
4263 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4264 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4265 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4266 where F::Target: FeeEstimator, L::Target: Logger
4268 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4269 return Ok((None, None, None));
4272 if !self.context.is_outbound() {
4273 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4274 return self.closing_signed(fee_estimator, &msg);
4276 return Ok((None, None, None));
4279 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4281 assert!(self.context.shutdown_scriptpubkey.is_some());
4282 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4283 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4284 our_min_fee, our_max_fee, total_fee_satoshis);
4286 match &self.context.holder_signer {
4287 ChannelSignerType::Ecdsa(ecdsa) => {
4289 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4290 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4292 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4293 Ok((Some(msgs::ClosingSigned {
4294 channel_id: self.context.channel_id,
4295 fee_satoshis: total_fee_satoshis,
4297 fee_range: Some(msgs::ClosingSignedFeeRange {
4298 min_fee_satoshis: our_min_fee,
4299 max_fee_satoshis: our_max_fee,
4306 // Marks a channel as waiting for a response from the counterparty. If it's not received
4307 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4309 fn mark_awaiting_response(&mut self) {
4310 self.context.sent_message_awaiting_response = Some(0);
4313 /// Determines whether we should disconnect the counterparty due to not receiving a response
4314 /// within our expected timeframe.
4316 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4317 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4318 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4321 // Don't disconnect when we're not waiting on a response.
4324 *ticks_elapsed += 1;
4325 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4329 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4330 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4332 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4333 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4335 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4336 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4337 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4338 // can do that via error message without getting a connection fail anyway...
4339 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4341 for htlc in self.context.pending_inbound_htlcs.iter() {
4342 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4343 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4346 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4348 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4349 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4352 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4353 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4354 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4357 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4360 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4361 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4362 // any further commitment updates after we set LocalShutdownSent.
4363 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4365 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4368 assert!(send_shutdown);
4369 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4370 Ok(scriptpubkey) => scriptpubkey,
4371 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4373 if !shutdown_scriptpubkey.is_compatible(their_features) {
4374 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4376 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4381 // From here on out, we may not fail!
4383 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4384 self.context.update_time_counter += 1;
4386 let monitor_update = if update_shutdown_script {
4387 self.context.latest_monitor_update_id += 1;
4388 let monitor_update = ChannelMonitorUpdate {
4389 update_id: self.context.latest_monitor_update_id,
4390 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4391 scriptpubkey: self.get_closing_scriptpubkey(),
4394 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4395 self.push_ret_blockable_mon_update(monitor_update)
4397 let shutdown = if send_shutdown {
4398 Some(msgs::Shutdown {
4399 channel_id: self.context.channel_id,
4400 scriptpubkey: self.get_closing_scriptpubkey(),
4404 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4405 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4406 // cell HTLCs and return them to fail the payment.
4407 self.context.holding_cell_update_fee = None;
4408 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4409 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4411 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4412 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4419 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4420 self.context.update_time_counter += 1;
4422 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4425 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4426 let mut tx = closing_tx.trust().built_transaction().clone();
4428 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4430 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4431 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4432 let mut holder_sig = sig.serialize_der().to_vec();
4433 holder_sig.push(EcdsaSighashType::All as u8);
4434 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4435 cp_sig.push(EcdsaSighashType::All as u8);
4436 if funding_key[..] < counterparty_funding_key[..] {
4437 tx.input[0].witness.push(holder_sig);
4438 tx.input[0].witness.push(cp_sig);
4440 tx.input[0].witness.push(cp_sig);
4441 tx.input[0].witness.push(holder_sig);
4444 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4448 pub fn closing_signed<F: Deref>(
4449 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4450 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4451 where F::Target: FeeEstimator
4453 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4454 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4456 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4457 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4459 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4460 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4462 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4463 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4466 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4467 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4470 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4471 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4472 return Ok((None, None, None));
4475 let funding_redeemscript = self.context.get_funding_redeemscript();
4476 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4477 if used_total_fee != msg.fee_satoshis {
4478 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4480 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4482 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4485 // The remote end may have decided to revoke their output due to inconsistent dust
4486 // limits, so check for that case by re-checking the signature here.
4487 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4488 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4489 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4493 for outp in closing_tx.trust().built_transaction().output.iter() {
4494 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4495 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4499 assert!(self.context.shutdown_scriptpubkey.is_some());
4500 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4501 if last_fee == msg.fee_satoshis {
4502 let shutdown_result = ShutdownResult {
4503 monitor_update: None,
4504 dropped_outbound_htlcs: Vec::new(),
4505 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4507 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4508 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4509 self.context.update_time_counter += 1;
4510 return Ok((None, Some(tx), Some(shutdown_result)));
4514 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4516 macro_rules! propose_fee {
4517 ($new_fee: expr) => {
4518 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4519 (closing_tx, $new_fee)
4521 self.build_closing_transaction($new_fee, false)
4524 return match &self.context.holder_signer {
4525 ChannelSignerType::Ecdsa(ecdsa) => {
4527 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4528 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4529 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4530 let shutdown_result = ShutdownResult {
4531 monitor_update: None,
4532 dropped_outbound_htlcs: Vec::new(),
4533 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4535 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4536 self.context.update_time_counter += 1;
4537 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4538 (Some(tx), Some(shutdown_result))
4543 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4544 Ok((Some(msgs::ClosingSigned {
4545 channel_id: self.context.channel_id,
4546 fee_satoshis: used_fee,
4548 fee_range: Some(msgs::ClosingSignedFeeRange {
4549 min_fee_satoshis: our_min_fee,
4550 max_fee_satoshis: our_max_fee,
4552 }), signed_tx, shutdown_result))
4558 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4559 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4560 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4562 if max_fee_satoshis < our_min_fee {
4563 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4565 if min_fee_satoshis > our_max_fee {
4566 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4569 if !self.context.is_outbound() {
4570 // They have to pay, so pick the highest fee in the overlapping range.
4571 // We should never set an upper bound aside from their full balance
4572 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4573 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4575 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4576 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4577 msg.fee_satoshis, our_min_fee, our_max_fee)));
4579 // The proposed fee is in our acceptable range, accept it and broadcast!
4580 propose_fee!(msg.fee_satoshis);
4583 // Old fee style negotiation. We don't bother to enforce whether they are complying
4584 // with the "making progress" requirements, we just comply and hope for the best.
4585 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4586 if msg.fee_satoshis > last_fee {
4587 if msg.fee_satoshis < our_max_fee {
4588 propose_fee!(msg.fee_satoshis);
4589 } else if last_fee < our_max_fee {
4590 propose_fee!(our_max_fee);
4592 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4595 if msg.fee_satoshis > our_min_fee {
4596 propose_fee!(msg.fee_satoshis);
4597 } else if last_fee > our_min_fee {
4598 propose_fee!(our_min_fee);
4600 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4604 if msg.fee_satoshis < our_min_fee {
4605 propose_fee!(our_min_fee);
4606 } else if msg.fee_satoshis > our_max_fee {
4607 propose_fee!(our_max_fee);
4609 propose_fee!(msg.fee_satoshis);
4615 fn internal_htlc_satisfies_config(
4616 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4617 ) -> Result<(), (&'static str, u16)> {
4618 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4619 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4620 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4621 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4623 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4624 0x1000 | 12, // fee_insufficient
4627 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4629 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4630 0x1000 | 13, // incorrect_cltv_expiry
4636 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4637 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4638 /// unsuccessful, falls back to the previous one if one exists.
4639 pub fn htlc_satisfies_config(
4640 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4641 ) -> Result<(), (&'static str, u16)> {
4642 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4644 if let Some(prev_config) = self.context.prev_config() {
4645 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4652 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4653 self.context.cur_holder_commitment_transaction_number + 1
4656 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4657 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4660 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4661 self.context.cur_counterparty_commitment_transaction_number + 2
4665 pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
4666 &self.context.holder_signer
4670 pub fn get_value_stat(&self) -> ChannelValueStat {
4672 value_to_self_msat: self.context.value_to_self_msat,
4673 channel_value_msat: self.context.channel_value_satoshis * 1000,
4674 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4675 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4676 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4677 holding_cell_outbound_amount_msat: {
4679 for h in self.context.holding_cell_htlc_updates.iter() {
4681 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4689 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4690 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4694 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4695 /// Allowed in any state (including after shutdown)
4696 pub fn is_awaiting_monitor_update(&self) -> bool {
4697 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4700 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4701 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4702 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4703 self.context.blocked_monitor_updates[0].update.update_id - 1
4706 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4707 /// further blocked monitor update exists after the next.
4708 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4709 if self.context.blocked_monitor_updates.is_empty() { return None; }
4710 Some((self.context.blocked_monitor_updates.remove(0).update,
4711 !self.context.blocked_monitor_updates.is_empty()))
4714 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4715 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4716 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4717 -> Option<ChannelMonitorUpdate> {
4718 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4719 if !release_monitor {
4720 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4729 pub fn blocked_monitor_updates_pending(&self) -> usize {
4730 self.context.blocked_monitor_updates.len()
4733 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4734 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4735 /// transaction. If the channel is inbound, this implies simply that the channel has not
4737 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4738 if !self.is_awaiting_monitor_update() { return false; }
4739 if self.context.channel_state &
4740 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4741 == ChannelState::FundingSent as u32 {
4742 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4743 // FundingSent set, though our peer could have sent their channel_ready.
4744 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4747 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4748 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4749 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4750 // waiting for the initial monitor persistence. Thus, we check if our commitment
4751 // transaction numbers have both been iterated only exactly once (for the
4752 // funding_signed), and we're awaiting monitor update.
4754 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4755 // only way to get an awaiting-monitor-update state during initial funding is if the
4756 // initial monitor persistence is still pending).
4758 // Because deciding we're awaiting initial broadcast spuriously could result in
4759 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4760 // we hard-assert here, even in production builds.
4761 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4762 assert!(self.context.monitor_pending_channel_ready);
4763 assert_eq!(self.context.latest_monitor_update_id, 0);
4769 /// Returns true if our channel_ready has been sent
4770 pub fn is_our_channel_ready(&self) -> bool {
4771 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4774 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4775 pub fn received_shutdown(&self) -> bool {
4776 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4779 /// Returns true if we either initiated or agreed to shut down the channel.
4780 pub fn sent_shutdown(&self) -> bool {
4781 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4784 /// Returns true if this channel is fully shut down. True here implies that no further actions
4785 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4786 /// will be handled appropriately by the chain monitor.
4787 pub fn is_shutdown(&self) -> bool {
4788 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4789 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4794 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4795 self.context.channel_update_status
4798 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4799 self.context.update_time_counter += 1;
4800 self.context.channel_update_status = status;
4803 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4805 // * always when a new block/transactions are confirmed with the new height
4806 // * when funding is signed with a height of 0
4807 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4811 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4812 if funding_tx_confirmations <= 0 {
4813 self.context.funding_tx_confirmation_height = 0;
4816 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4820 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4821 // channel_ready until the entire batch is ready.
4822 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4823 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4824 self.context.channel_state |= ChannelState::OurChannelReady as u32;
4826 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4827 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4828 self.context.update_time_counter += 1;
4830 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4831 // We got a reorg but not enough to trigger a force close, just ignore.
4834 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4835 // We should never see a funding transaction on-chain until we've received
4836 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4837 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4838 // however, may do this and we shouldn't treat it as a bug.
4839 #[cfg(not(fuzzing))]
4840 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
4841 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
4842 self.context.channel_state);
4844 // We got a reorg but not enough to trigger a force close, just ignore.
4848 if need_commitment_update {
4849 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
4850 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4851 let next_per_commitment_point =
4852 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
4853 return Some(msgs::ChannelReady {
4854 channel_id: self.context.channel_id,
4855 next_per_commitment_point,
4856 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4860 self.context.monitor_pending_channel_ready = true;
4866 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
4867 /// In the first case, we store the confirmation height and calculating the short channel id.
4868 /// In the second, we simply return an Err indicating we need to be force-closed now.
4869 pub fn transactions_confirmed<NS: Deref, L: Deref>(
4870 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
4871 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
4872 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4874 NS::Target: NodeSigner,
4877 let mut msgs = (None, None);
4878 if let Some(funding_txo) = self.context.get_funding_txo() {
4879 for &(index_in_block, tx) in txdata.iter() {
4880 // Check if the transaction is the expected funding transaction, and if it is,
4881 // check that it pays the right amount to the right script.
4882 if self.context.funding_tx_confirmation_height == 0 {
4883 if tx.txid() == funding_txo.txid {
4884 let txo_idx = funding_txo.index as usize;
4885 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
4886 tx.output[txo_idx].value != self.context.channel_value_satoshis {
4887 if self.context.is_outbound() {
4888 // If we generated the funding transaction and it doesn't match what it
4889 // should, the client is really broken and we should just panic and
4890 // tell them off. That said, because hash collisions happen with high
4891 // probability in fuzzing mode, if we're fuzzing we just close the
4892 // channel and move on.
4893 #[cfg(not(fuzzing))]
4894 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4896 self.context.update_time_counter += 1;
4897 let err_reason = "funding tx had wrong script/value or output index";
4898 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
4900 if self.context.is_outbound() {
4901 if !tx.is_coin_base() {
4902 for input in tx.input.iter() {
4903 if input.witness.is_empty() {
4904 // We generated a malleable funding transaction, implying we've
4905 // just exposed ourselves to funds loss to our counterparty.
4906 #[cfg(not(fuzzing))]
4907 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
4912 self.context.funding_tx_confirmation_height = height;
4913 self.context.funding_tx_confirmed_in = Some(*block_hash);
4914 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
4915 Ok(scid) => Some(scid),
4916 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
4919 // If this is a coinbase transaction and not a 0-conf channel
4920 // we should update our min_depth to 100 to handle coinbase maturity
4921 if tx.is_coin_base() &&
4922 self.context.minimum_depth.unwrap_or(0) > 0 &&
4923 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
4924 self.context.minimum_depth = Some(COINBASE_MATURITY);
4927 // If we allow 1-conf funding, we may need to check for channel_ready here and
4928 // send it immediately instead of waiting for a best_block_updated call (which
4929 // may have already happened for this block).
4930 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4931 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
4932 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
4933 msgs = (Some(channel_ready), announcement_sigs);
4936 for inp in tx.input.iter() {
4937 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
4938 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
4939 return Err(ClosureReason::CommitmentTxConfirmed);
4947 /// When a new block is connected, we check the height of the block against outbound holding
4948 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
4949 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
4950 /// handled by the ChannelMonitor.
4952 /// If we return Err, the channel may have been closed, at which point the standard
4953 /// requirements apply - no calls may be made except those explicitly stated to be allowed
4956 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
4958 pub fn best_block_updated<NS: Deref, L: Deref>(
4959 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
4960 node_signer: &NS, user_config: &UserConfig, logger: &L
4961 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4963 NS::Target: NodeSigner,
4966 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
4969 fn do_best_block_updated<NS: Deref, L: Deref>(
4970 &mut self, height: u32, highest_header_time: u32,
4971 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
4972 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
4974 NS::Target: NodeSigner,
4977 let mut timed_out_htlcs = Vec::new();
4978 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4979 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
4981 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
4982 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4984 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
4985 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
4986 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
4994 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
4996 if let Some(channel_ready) = self.check_get_channel_ready(height) {
4997 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
4998 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5000 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5001 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5004 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5005 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5006 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5007 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5008 if self.context.funding_tx_confirmation_height == 0 {
5009 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5010 // zero if it has been reorged out, however in either case, our state flags
5011 // indicate we've already sent a channel_ready
5012 funding_tx_confirmations = 0;
5015 // If we've sent channel_ready (or have both sent and received channel_ready), and
5016 // the funding transaction has become unconfirmed,
5017 // close the channel and hope we can get the latest state on chain (because presumably
5018 // the funding transaction is at least still in the mempool of most nodes).
5020 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5021 // 0-conf channel, but not doing so may lead to the
5022 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5024 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5025 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5026 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5027 return Err(ClosureReason::ProcessingError { err: err_reason });
5029 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5030 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5031 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5032 // If funding_tx_confirmed_in is unset, the channel must not be active
5033 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5034 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5035 return Err(ClosureReason::FundingTimedOut);
5038 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5039 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5041 Ok((None, timed_out_htlcs, announcement_sigs))
5044 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5045 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5046 /// before the channel has reached channel_ready and we can just wait for more blocks.
5047 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5048 if self.context.funding_tx_confirmation_height != 0 {
5049 // We handle the funding disconnection by calling best_block_updated with a height one
5050 // below where our funding was connected, implying a reorg back to conf_height - 1.
5051 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5052 // We use the time field to bump the current time we set on channel updates if its
5053 // larger. If we don't know that time has moved forward, we can just set it to the last
5054 // time we saw and it will be ignored.
5055 let best_time = self.context.update_time_counter;
5056 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5057 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5058 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5059 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5060 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5066 // We never learned about the funding confirmation anyway, just ignore
5071 // Methods to get unprompted messages to send to the remote end (or where we already returned
5072 // something in the handler for the message that prompted this message):
5074 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5075 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5076 /// directions). Should be used for both broadcasted announcements and in response to an
5077 /// AnnouncementSignatures message from the remote peer.
5079 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5082 /// This will only return ChannelError::Ignore upon failure.
5084 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5085 fn get_channel_announcement<NS: Deref>(
5086 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5087 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5088 if !self.context.config.announced_channel {
5089 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5091 if !self.context.is_usable() {
5092 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5095 let short_channel_id = self.context.get_short_channel_id()
5096 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5097 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5098 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5099 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5100 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5102 let msg = msgs::UnsignedChannelAnnouncement {
5103 features: channelmanager::provided_channel_features(&user_config),
5106 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5107 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5108 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5109 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5110 excess_data: Vec::new(),
5116 fn get_announcement_sigs<NS: Deref, L: Deref>(
5117 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5118 best_block_height: u32, logger: &L
5119 ) -> Option<msgs::AnnouncementSignatures>
5121 NS::Target: NodeSigner,
5124 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5128 if !self.context.is_usable() {
5132 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5133 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5137 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5141 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5142 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5145 log_trace!(logger, "{:?}", e);
5149 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5151 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5156 match &self.context.holder_signer {
5157 ChannelSignerType::Ecdsa(ecdsa) => {
5158 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5160 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5165 let short_channel_id = match self.context.get_short_channel_id() {
5167 None => return None,
5170 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5172 Some(msgs::AnnouncementSignatures {
5173 channel_id: self.context.channel_id(),
5175 node_signature: our_node_sig,
5176 bitcoin_signature: our_bitcoin_sig,
5182 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5184 fn sign_channel_announcement<NS: Deref>(
5185 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5186 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5187 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5188 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5189 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5190 let were_node_one = announcement.node_id_1 == our_node_key;
5192 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5193 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5194 match &self.context.holder_signer {
5195 ChannelSignerType::Ecdsa(ecdsa) => {
5196 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5197 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5198 Ok(msgs::ChannelAnnouncement {
5199 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5200 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5201 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5202 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5203 contents: announcement,
5208 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5212 /// Processes an incoming announcement_signatures message, providing a fully-signed
5213 /// channel_announcement message which we can broadcast and storing our counterparty's
5214 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5215 pub fn announcement_signatures<NS: Deref>(
5216 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5217 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5218 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5219 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5221 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5223 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5224 return Err(ChannelError::Close(format!(
5225 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5226 &announcement, self.context.get_counterparty_node_id())));
5228 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5229 return Err(ChannelError::Close(format!(
5230 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5231 &announcement, self.context.counterparty_funding_pubkey())));
5234 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5235 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5236 return Err(ChannelError::Ignore(
5237 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5240 self.sign_channel_announcement(node_signer, announcement)
5243 /// Gets a signed channel_announcement for this channel, if we previously received an
5244 /// announcement_signatures from our counterparty.
5245 pub fn get_signed_channel_announcement<NS: Deref>(
5246 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5247 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5248 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5251 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5253 Err(_) => return None,
5255 match self.sign_channel_announcement(node_signer, announcement) {
5256 Ok(res) => Some(res),
5261 /// May panic if called on a channel that wasn't immediately-previously
5262 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5263 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5264 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5265 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5266 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5267 // current to_remote balances. However, it no longer has any use, and thus is now simply
5268 // set to a dummy (but valid, as required by the spec) public key.
5269 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5270 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5271 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5272 let mut pk = [2; 33]; pk[1] = 0xff;
5273 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5274 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5275 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5276 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5279 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5282 self.mark_awaiting_response();
5283 msgs::ChannelReestablish {
5284 channel_id: self.context.channel_id(),
5285 // The protocol has two different commitment number concepts - the "commitment
5286 // transaction number", which starts from 0 and counts up, and the "revocation key
5287 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5288 // commitment transaction numbers by the index which will be used to reveal the
5289 // revocation key for that commitment transaction, which means we have to convert them
5290 // to protocol-level commitment numbers here...
5292 // next_local_commitment_number is the next commitment_signed number we expect to
5293 // receive (indicating if they need to resend one that we missed).
5294 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5295 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5296 // receive, however we track it by the next commitment number for a remote transaction
5297 // (which is one further, as they always revoke previous commitment transaction, not
5298 // the one we send) so we have to decrement by 1. Note that if
5299 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5300 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5302 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5303 your_last_per_commitment_secret: remote_last_secret,
5304 my_current_per_commitment_point: dummy_pubkey,
5305 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5306 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5307 // txid of that interactive transaction, else we MUST NOT set it.
5308 next_funding_txid: None,
5313 // Send stuff to our remote peers:
5315 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5316 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5317 /// commitment update.
5319 /// `Err`s will only be [`ChannelError::Ignore`].
5320 pub fn queue_add_htlc<F: Deref, L: Deref>(
5321 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5322 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5323 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5324 ) -> Result<(), ChannelError>
5325 where F::Target: FeeEstimator, L::Target: Logger
5328 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5329 skimmed_fee_msat, fee_estimator, logger)
5330 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5332 if let ChannelError::Ignore(_) = err { /* fine */ }
5333 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5338 /// Adds a pending outbound HTLC to this channel, note that you probably want
5339 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5341 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5343 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5344 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5346 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5347 /// we may not yet have sent the previous commitment update messages and will need to
5348 /// regenerate them.
5350 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5351 /// on this [`Channel`] if `force_holding_cell` is false.
5353 /// `Err`s will only be [`ChannelError::Ignore`].
5354 fn send_htlc<F: Deref, L: Deref>(
5355 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5356 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5357 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5358 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5359 where F::Target: FeeEstimator, L::Target: Logger
5361 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5362 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5364 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5365 if amount_msat > channel_total_msat {
5366 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5369 if amount_msat == 0 {
5370 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5373 let available_balances = self.context.get_available_balances(fee_estimator);
5374 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5375 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5376 available_balances.next_outbound_htlc_minimum_msat)));
5379 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5380 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5381 available_balances.next_outbound_htlc_limit_msat)));
5384 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5385 // Note that this should never really happen, if we're !is_live() on receipt of an
5386 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5387 // the user to send directly into a !is_live() channel. However, if we
5388 // disconnected during the time the previous hop was doing the commitment dance we may
5389 // end up getting here after the forwarding delay. In any case, returning an
5390 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5391 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5394 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5395 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5396 payment_hash, amount_msat,
5397 if force_holding_cell { "into holding cell" }
5398 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5399 else { "to peer" });
5401 if need_holding_cell {
5402 force_holding_cell = true;
5405 // Now update local state:
5406 if force_holding_cell {
5407 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5412 onion_routing_packet,
5418 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5419 htlc_id: self.context.next_holder_htlc_id,
5421 payment_hash: payment_hash.clone(),
5423 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5428 let res = msgs::UpdateAddHTLC {
5429 channel_id: self.context.channel_id,
5430 htlc_id: self.context.next_holder_htlc_id,
5434 onion_routing_packet,
5437 self.context.next_holder_htlc_id += 1;
5442 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5443 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5444 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5445 // fail to generate this, we still are at least at a position where upgrading their status
5447 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5448 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5449 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5451 if let Some(state) = new_state {
5452 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5456 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5457 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5458 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5459 // Grab the preimage, if it exists, instead of cloning
5460 let mut reason = OutboundHTLCOutcome::Success(None);
5461 mem::swap(outcome, &mut reason);
5462 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5465 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5466 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5467 debug_assert!(!self.context.is_outbound());
5468 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5469 self.context.feerate_per_kw = feerate;
5470 self.context.pending_update_fee = None;
5473 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5475 let (mut htlcs_ref, counterparty_commitment_tx) =
5476 self.build_commitment_no_state_update(logger);
5477 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5478 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5479 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5481 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5482 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5485 self.context.latest_monitor_update_id += 1;
5486 let monitor_update = ChannelMonitorUpdate {
5487 update_id: self.context.latest_monitor_update_id,
5488 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5489 commitment_txid: counterparty_commitment_txid,
5490 htlc_outputs: htlcs.clone(),
5491 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5492 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5493 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5494 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5495 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5498 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5502 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5503 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5504 where L::Target: Logger
5506 let counterparty_keys = self.context.build_remote_transaction_keys();
5507 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5508 let counterparty_commitment_tx = commitment_stats.tx;
5510 #[cfg(any(test, fuzzing))]
5512 if !self.context.is_outbound() {
5513 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5514 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5515 if let Some(info) = projected_commit_tx_info {
5516 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5517 if info.total_pending_htlcs == total_pending_htlcs
5518 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5519 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5520 && info.feerate == self.context.feerate_per_kw {
5521 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5522 assert_eq!(actual_fee, info.fee);
5528 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5531 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5532 /// generation when we shouldn't change HTLC/channel state.
5533 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5534 // Get the fee tests from `build_commitment_no_state_update`
5535 #[cfg(any(test, fuzzing))]
5536 self.build_commitment_no_state_update(logger);
5538 let counterparty_keys = self.context.build_remote_transaction_keys();
5539 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5540 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5542 match &self.context.holder_signer {
5543 ChannelSignerType::Ecdsa(ecdsa) => {
5544 let (signature, htlc_signatures);
5547 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5548 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5552 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5553 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5555 htlc_signatures = res.1;
5557 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5558 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5559 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5560 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5562 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5563 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5564 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5565 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5566 log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5567 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5571 Ok((msgs::CommitmentSigned {
5572 channel_id: self.context.channel_id,
5576 partial_signature_with_nonce: None,
5577 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5582 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5583 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5585 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5586 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5587 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5588 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5589 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5590 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5591 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5592 where F::Target: FeeEstimator, L::Target: Logger
5594 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5595 onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5596 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5599 let monitor_update = self.build_commitment_no_status_check(logger);
5600 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5601 Ok(self.push_ret_blockable_mon_update(monitor_update))
5607 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5609 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5610 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5611 fee_base_msat: msg.contents.fee_base_msat,
5612 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5613 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5615 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5617 self.context.counterparty_forwarding_info = new_forwarding_info;
5623 /// Begins the shutdown process, getting a message for the remote peer and returning all
5624 /// holding cell HTLCs for payment failure.
5626 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5627 /// [`ChannelMonitorUpdate`] will be returned).
5628 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5629 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5630 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5632 for htlc in self.context.pending_outbound_htlcs.iter() {
5633 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5634 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5637 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5638 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5639 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5641 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5642 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5645 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5646 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5648 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5649 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5650 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5653 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5654 // script is set, we just force-close and call it a day.
5655 let mut chan_closed = false;
5656 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5660 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5662 None if !chan_closed => {
5663 // use override shutdown script if provided
5664 let shutdown_scriptpubkey = match override_shutdown_script {
5665 Some(script) => script,
5667 // otherwise, use the shutdown scriptpubkey provided by the signer
5668 match signer_provider.get_shutdown_scriptpubkey() {
5669 Ok(scriptpubkey) => scriptpubkey,
5670 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5674 if !shutdown_scriptpubkey.is_compatible(their_features) {
5675 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5677 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5683 // From here on out, we may not fail!
5684 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5685 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5686 let shutdown_result = ShutdownResult {
5687 monitor_update: None,
5688 dropped_outbound_htlcs: Vec::new(),
5689 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5691 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5692 Some(shutdown_result)
5694 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5697 self.context.update_time_counter += 1;
5699 let monitor_update = if update_shutdown_script {
5700 self.context.latest_monitor_update_id += 1;
5701 let monitor_update = ChannelMonitorUpdate {
5702 update_id: self.context.latest_monitor_update_id,
5703 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5704 scriptpubkey: self.get_closing_scriptpubkey(),
5707 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5708 self.push_ret_blockable_mon_update(monitor_update)
5710 let shutdown = msgs::Shutdown {
5711 channel_id: self.context.channel_id,
5712 scriptpubkey: self.get_closing_scriptpubkey(),
5715 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5716 // our shutdown until we've committed all of the pending changes.
5717 self.context.holding_cell_update_fee = None;
5718 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5719 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5721 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5722 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5729 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5730 "we can't both complete shutdown and return a monitor update");
5732 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5735 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5736 self.context.holding_cell_htlc_updates.iter()
5737 .flat_map(|htlc_update| {
5739 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5740 => Some((source, payment_hash)),
5744 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5748 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5749 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5750 pub context: ChannelContext<SP>,
5751 pub unfunded_context: UnfundedChannelContext,
5754 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5755 pub fn new<ES: Deref, F: Deref>(
5756 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5757 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5758 outbound_scid_alias: u64
5759 ) -> Result<OutboundV1Channel<SP>, APIError>
5760 where ES::Target: EntropySource,
5761 F::Target: FeeEstimator
5763 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5764 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5765 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5766 let pubkeys = holder_signer.pubkeys().clone();
5768 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5769 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5771 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5772 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5774 let channel_value_msat = channel_value_satoshis * 1000;
5775 if push_msat > channel_value_msat {
5776 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5778 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5779 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5781 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5782 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5783 // Protocol level safety check in place, although it should never happen because
5784 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5785 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5788 let channel_type = Self::get_initial_channel_type(&config, their_features);
5789 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5791 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5792 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5794 (ConfirmationTarget::NonAnchorChannelFee, 0)
5796 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5798 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5799 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5800 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5801 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5804 let mut secp_ctx = Secp256k1::new();
5805 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5807 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5808 match signer_provider.get_shutdown_scriptpubkey() {
5809 Ok(scriptpubkey) => Some(scriptpubkey),
5810 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5814 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5815 if !shutdown_scriptpubkey.is_compatible(&their_features) {
5816 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5820 let destination_script = match signer_provider.get_destination_script() {
5821 Ok(script) => script,
5822 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5825 let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
5828 context: ChannelContext {
5831 config: LegacyChannelConfig {
5832 options: config.channel_config.clone(),
5833 announced_channel: config.channel_handshake_config.announced_channel,
5834 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
5839 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
5841 channel_id: temporary_channel_id,
5842 temporary_channel_id: Some(temporary_channel_id),
5843 channel_state: ChannelState::OurInitSent as u32,
5844 announcement_sigs_state: AnnouncementSigsState::NotSent,
5846 channel_value_satoshis,
5848 latest_monitor_update_id: 0,
5850 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
5851 shutdown_scriptpubkey,
5854 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5855 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
5858 pending_inbound_htlcs: Vec::new(),
5859 pending_outbound_htlcs: Vec::new(),
5860 holding_cell_htlc_updates: Vec::new(),
5861 pending_update_fee: None,
5862 holding_cell_update_fee: None,
5863 next_holder_htlc_id: 0,
5864 next_counterparty_htlc_id: 0,
5865 update_time_counter: 1,
5867 resend_order: RAACommitmentOrder::CommitmentFirst,
5869 monitor_pending_channel_ready: false,
5870 monitor_pending_revoke_and_ack: false,
5871 monitor_pending_commitment_signed: false,
5872 monitor_pending_forwards: Vec::new(),
5873 monitor_pending_failures: Vec::new(),
5874 monitor_pending_finalized_fulfills: Vec::new(),
5876 signer_pending_commitment_update: false,
5878 #[cfg(debug_assertions)]
5879 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5880 #[cfg(debug_assertions)]
5881 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5883 last_sent_closing_fee: None,
5884 pending_counterparty_closing_signed: None,
5885 closing_fee_limits: None,
5886 target_closing_feerate_sats_per_kw: None,
5888 funding_tx_confirmed_in: None,
5889 funding_tx_confirmation_height: 0,
5890 short_channel_id: None,
5891 channel_creation_height: current_chain_height,
5893 feerate_per_kw: commitment_feerate,
5894 counterparty_dust_limit_satoshis: 0,
5895 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
5896 counterparty_max_htlc_value_in_flight_msat: 0,
5897 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
5898 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
5899 holder_selected_channel_reserve_satoshis,
5900 counterparty_htlc_minimum_msat: 0,
5901 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
5902 counterparty_max_accepted_htlcs: 0,
5903 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
5904 minimum_depth: None, // Filled in in accept_channel
5906 counterparty_forwarding_info: None,
5908 channel_transaction_parameters: ChannelTransactionParameters {
5909 holder_pubkeys: pubkeys,
5910 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
5911 is_outbound_from_holder: true,
5912 counterparty_parameters: None,
5913 funding_outpoint: None,
5914 channel_type_features: channel_type.clone()
5916 funding_transaction: None,
5917 is_batch_funding: None,
5919 counterparty_cur_commitment_point: None,
5920 counterparty_prev_commitment_point: None,
5921 counterparty_node_id,
5923 counterparty_shutdown_scriptpubkey: None,
5925 commitment_secrets: CounterpartyCommitmentSecrets::new(),
5927 channel_update_status: ChannelUpdateStatus::Enabled,
5928 closing_signed_in_flight: false,
5930 announcement_sigs: None,
5932 #[cfg(any(test, fuzzing))]
5933 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
5934 #[cfg(any(test, fuzzing))]
5935 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
5937 workaround_lnd_bug_4006: None,
5938 sent_message_awaiting_response: None,
5940 latest_inbound_scid_alias: None,
5941 outbound_scid_alias,
5943 channel_pending_event_emitted: false,
5944 channel_ready_event_emitted: false,
5946 #[cfg(any(test, fuzzing))]
5947 historical_inbound_htlc_fulfills: HashSet::new(),
5952 blocked_monitor_updates: Vec::new(),
5954 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
5958 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
5959 fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5960 let counterparty_keys = self.context.build_remote_transaction_keys();
5961 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5962 match &self.context.holder_signer {
5963 // TODO (taproot|arik): move match into calling method for Taproot
5964 ChannelSignerType::Ecdsa(ecdsa) => {
5965 Ok(ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
5966 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5971 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5972 /// a funding_created message for the remote peer.
5973 /// Panics if called at some time other than immediately after initial handshake, if called twice,
5974 /// or if called on an inbound channel.
5975 /// Note that channel_id changes during this call!
5976 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5977 /// If an Err is returned, it is a ChannelError::Close.
5978 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
5979 -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
5980 if !self.context.is_outbound() {
5981 panic!("Tried to create outbound funding_created message on an inbound channel!");
5983 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5984 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5986 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5987 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5988 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5989 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5992 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5993 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
5995 let signature = match self.get_funding_created_signature(logger) {
5998 log_error!(logger, "Got bad signatures: {:?}!", e);
5999 self.context.channel_transaction_parameters.funding_outpoint = None;
6000 return Err((self, e));
6004 let temporary_channel_id = self.context.channel_id;
6006 // Now that we're past error-generating stuff, update our local state:
6008 self.context.channel_state = ChannelState::FundingCreated as u32;
6009 self.context.channel_id = funding_txo.to_channel_id();
6011 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6012 // We can skip this if it is a zero-conf channel.
6013 if funding_transaction.is_coin_base() &&
6014 self.context.minimum_depth.unwrap_or(0) > 0 &&
6015 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6016 self.context.minimum_depth = Some(COINBASE_MATURITY);
6019 self.context.funding_transaction = Some(funding_transaction);
6020 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6022 let channel = Channel {
6023 context: self.context,
6026 Ok((channel, msgs::FundingCreated {
6027 temporary_channel_id,
6028 funding_txid: funding_txo.txid,
6029 funding_output_index: funding_txo.index,
6032 partial_signature_with_nonce: None,
6034 next_local_nonce: None,
6038 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6039 // The default channel type (ie the first one we try) depends on whether the channel is
6040 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6041 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6042 // with no other changes, and fall back to `only_static_remotekey`.
6043 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6044 if !config.channel_handshake_config.announced_channel &&
6045 config.channel_handshake_config.negotiate_scid_privacy &&
6046 their_features.supports_scid_privacy() {
6047 ret.set_scid_privacy_required();
6050 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6051 // set it now. If they don't understand it, we'll fall back to our default of
6052 // `only_static_remotekey`.
6053 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6054 their_features.supports_anchors_zero_fee_htlc_tx() {
6055 ret.set_anchors_zero_fee_htlc_tx_required();
6061 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6062 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6063 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6064 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6065 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6066 ) -> Result<msgs::OpenChannel, ()>
6068 F::Target: FeeEstimator
6070 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6071 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6072 // We've exhausted our options
6075 // We support opening a few different types of channels. Try removing our additional
6076 // features one by one until we've either arrived at our default or the counterparty has
6079 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6080 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6081 // checks whether the counterparty supports every feature, this would only happen if the
6082 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6084 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6085 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6086 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6087 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6088 } else if self.context.channel_type.supports_scid_privacy() {
6089 self.context.channel_type.clear_scid_privacy();
6091 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6093 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6094 Ok(self.get_open_channel(chain_hash))
6097 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6098 if !self.context.is_outbound() {
6099 panic!("Tried to open a channel for an inbound channel?");
6101 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6102 panic!("Cannot generate an open_channel after we've moved forward");
6105 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6106 panic!("Tried to send an open_channel for a channel that has already advanced");
6109 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6110 let keys = self.context.get_holder_pubkeys();
6114 temporary_channel_id: self.context.channel_id,
6115 funding_satoshis: self.context.channel_value_satoshis,
6116 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6117 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6118 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6119 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6120 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6121 feerate_per_kw: self.context.feerate_per_kw as u32,
6122 to_self_delay: self.context.get_holder_selected_contest_delay(),
6123 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6124 funding_pubkey: keys.funding_pubkey,
6125 revocation_basepoint: keys.revocation_basepoint,
6126 payment_point: keys.payment_point,
6127 delayed_payment_basepoint: keys.delayed_payment_basepoint,
6128 htlc_basepoint: keys.htlc_basepoint,
6129 first_per_commitment_point,
6130 channel_flags: if self.context.config.announced_channel {1} else {0},
6131 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6132 Some(script) => script.clone().into_inner(),
6133 None => Builder::new().into_script(),
6135 channel_type: Some(self.context.channel_type.clone()),
6140 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6141 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6143 // Check sanity of message fields:
6144 if !self.context.is_outbound() {
6145 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6147 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6148 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6150 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6151 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6153 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6154 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6156 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6157 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6159 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6160 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6161 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6163 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6164 if msg.htlc_minimum_msat >= full_channel_value_msat {
6165 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6167 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6168 if msg.to_self_delay > max_delay_acceptable {
6169 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6171 if msg.max_accepted_htlcs < 1 {
6172 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6174 if msg.max_accepted_htlcs > MAX_HTLCS {
6175 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6178 // Now check against optional parameters as set by config...
6179 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6180 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6182 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6183 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6185 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6186 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6188 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6189 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6191 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6192 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6194 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6195 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6197 if msg.minimum_depth > peer_limits.max_minimum_depth {
6198 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6201 if let Some(ty) = &msg.channel_type {
6202 if *ty != self.context.channel_type {
6203 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6205 } else if their_features.supports_channel_type() {
6206 // Assume they've accepted the channel type as they said they understand it.
6208 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6209 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6210 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6212 self.context.channel_type = channel_type.clone();
6213 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6216 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6217 match &msg.shutdown_scriptpubkey {
6218 &Some(ref script) => {
6219 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6220 if script.len() == 0 {
6223 if !script::is_bolt2_compliant(&script, their_features) {
6224 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6226 Some(script.clone())
6229 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6231 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6236 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6237 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6238 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6239 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6240 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6242 if peer_limits.trust_own_funding_0conf {
6243 self.context.minimum_depth = Some(msg.minimum_depth);
6245 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6248 let counterparty_pubkeys = ChannelPublicKeys {
6249 funding_pubkey: msg.funding_pubkey,
6250 revocation_basepoint: msg.revocation_basepoint,
6251 payment_point: msg.payment_point,
6252 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6253 htlc_basepoint: msg.htlc_basepoint
6256 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6257 selected_contest_delay: msg.to_self_delay,
6258 pubkeys: counterparty_pubkeys,
6261 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6262 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6264 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6265 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6271 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6272 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6273 pub context: ChannelContext<SP>,
6274 pub unfunded_context: UnfundedChannelContext,
6277 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6278 /// Creates a new channel from a remote sides' request for one.
6279 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6280 pub fn new<ES: Deref, F: Deref, L: Deref>(
6281 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6282 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6283 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6284 current_chain_height: u32, logger: &L, is_0conf: bool,
6285 ) -> Result<InboundV1Channel<SP>, ChannelError>
6286 where ES::Target: EntropySource,
6287 F::Target: FeeEstimator,
6290 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6292 // First check the channel type is known, failing before we do anything else if we don't
6293 // support this channel type.
6294 let channel_type = if let Some(channel_type) = &msg.channel_type {
6295 if channel_type.supports_any_optional_bits() {
6296 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6299 // We only support the channel types defined by the `ChannelManager` in
6300 // `provided_channel_type_features`. The channel type must always support
6301 // `static_remote_key`.
6302 if !channel_type.requires_static_remote_key() {
6303 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6305 // Make sure we support all of the features behind the channel type.
6306 if !channel_type.is_subset(our_supported_features) {
6307 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6309 if channel_type.requires_scid_privacy() && announced_channel {
6310 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6312 channel_type.clone()
6314 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6315 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6316 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6321 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6322 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6323 let pubkeys = holder_signer.pubkeys().clone();
6324 let counterparty_pubkeys = ChannelPublicKeys {
6325 funding_pubkey: msg.funding_pubkey,
6326 revocation_basepoint: msg.revocation_basepoint,
6327 payment_point: msg.payment_point,
6328 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6329 htlc_basepoint: msg.htlc_basepoint
6332 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6333 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6336 // Check sanity of message fields:
6337 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6338 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6340 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6341 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6343 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6344 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6346 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6347 if msg.push_msat > full_channel_value_msat {
6348 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6350 if msg.dust_limit_satoshis > msg.funding_satoshis {
6351 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6353 if msg.htlc_minimum_msat >= full_channel_value_msat {
6354 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6356 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6358 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6359 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6360 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6362 if msg.max_accepted_htlcs < 1 {
6363 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6365 if msg.max_accepted_htlcs > MAX_HTLCS {
6366 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6369 // Now check against optional parameters as set by config...
6370 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6371 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6373 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6374 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6376 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6377 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6379 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6380 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6382 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6383 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6385 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6386 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6388 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6389 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6392 // Convert things into internal flags and prep our state:
6394 if config.channel_handshake_limits.force_announced_channel_preference {
6395 if config.channel_handshake_config.announced_channel != announced_channel {
6396 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6400 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6401 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6402 // Protocol level safety check in place, although it should never happen because
6403 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6404 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6406 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6407 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6409 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6410 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6411 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6413 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6414 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6417 // check if the funder's amount for the initial commitment tx is sufficient
6418 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6419 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6420 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6424 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6425 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6426 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6427 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6430 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6431 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6432 // want to push much to us), our counterparty should always have more than our reserve.
6433 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6434 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6437 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6438 match &msg.shutdown_scriptpubkey {
6439 &Some(ref script) => {
6440 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6441 if script.len() == 0 {
6444 if !script::is_bolt2_compliant(&script, their_features) {
6445 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6447 Some(script.clone())
6450 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6452 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6457 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6458 match signer_provider.get_shutdown_scriptpubkey() {
6459 Ok(scriptpubkey) => Some(scriptpubkey),
6460 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6464 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6465 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6466 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6470 let destination_script = match signer_provider.get_destination_script() {
6471 Ok(script) => script,
6472 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6475 let mut secp_ctx = Secp256k1::new();
6476 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6478 let minimum_depth = if is_0conf {
6481 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6485 context: ChannelContext {
6488 config: LegacyChannelConfig {
6489 options: config.channel_config.clone(),
6491 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6496 inbound_handshake_limits_override: None,
6498 temporary_channel_id: Some(msg.temporary_channel_id),
6499 channel_id: msg.temporary_channel_id,
6500 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6501 announcement_sigs_state: AnnouncementSigsState::NotSent,
6504 latest_monitor_update_id: 0,
6506 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6507 shutdown_scriptpubkey,
6510 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6511 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6512 value_to_self_msat: msg.push_msat,
6514 pending_inbound_htlcs: Vec::new(),
6515 pending_outbound_htlcs: Vec::new(),
6516 holding_cell_htlc_updates: Vec::new(),
6517 pending_update_fee: None,
6518 holding_cell_update_fee: None,
6519 next_holder_htlc_id: 0,
6520 next_counterparty_htlc_id: 0,
6521 update_time_counter: 1,
6523 resend_order: RAACommitmentOrder::CommitmentFirst,
6525 monitor_pending_channel_ready: false,
6526 monitor_pending_revoke_and_ack: false,
6527 monitor_pending_commitment_signed: false,
6528 monitor_pending_forwards: Vec::new(),
6529 monitor_pending_failures: Vec::new(),
6530 monitor_pending_finalized_fulfills: Vec::new(),
6532 signer_pending_commitment_update: false,
6534 #[cfg(debug_assertions)]
6535 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6536 #[cfg(debug_assertions)]
6537 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6539 last_sent_closing_fee: None,
6540 pending_counterparty_closing_signed: None,
6541 closing_fee_limits: None,
6542 target_closing_feerate_sats_per_kw: None,
6544 funding_tx_confirmed_in: None,
6545 funding_tx_confirmation_height: 0,
6546 short_channel_id: None,
6547 channel_creation_height: current_chain_height,
6549 feerate_per_kw: msg.feerate_per_kw,
6550 channel_value_satoshis: msg.funding_satoshis,
6551 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6552 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6553 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6554 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6555 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6556 holder_selected_channel_reserve_satoshis,
6557 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6558 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6559 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6560 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6563 counterparty_forwarding_info: None,
6565 channel_transaction_parameters: ChannelTransactionParameters {
6566 holder_pubkeys: pubkeys,
6567 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6568 is_outbound_from_holder: false,
6569 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6570 selected_contest_delay: msg.to_self_delay,
6571 pubkeys: counterparty_pubkeys,
6573 funding_outpoint: None,
6574 channel_type_features: channel_type.clone()
6576 funding_transaction: None,
6577 is_batch_funding: None,
6579 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6580 counterparty_prev_commitment_point: None,
6581 counterparty_node_id,
6583 counterparty_shutdown_scriptpubkey,
6585 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6587 channel_update_status: ChannelUpdateStatus::Enabled,
6588 closing_signed_in_flight: false,
6590 announcement_sigs: None,
6592 #[cfg(any(test, fuzzing))]
6593 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6594 #[cfg(any(test, fuzzing))]
6595 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6597 workaround_lnd_bug_4006: None,
6598 sent_message_awaiting_response: None,
6600 latest_inbound_scid_alias: None,
6601 outbound_scid_alias: 0,
6603 channel_pending_event_emitted: false,
6604 channel_ready_event_emitted: false,
6606 #[cfg(any(test, fuzzing))]
6607 historical_inbound_htlc_fulfills: HashSet::new(),
6612 blocked_monitor_updates: Vec::new(),
6614 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6620 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6621 /// should be sent back to the counterparty node.
6623 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6624 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6625 if self.context.is_outbound() {
6626 panic!("Tried to send accept_channel for an outbound channel?");
6628 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6629 panic!("Tried to send accept_channel after channel had moved forward");
6631 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6632 panic!("Tried to send an accept_channel for a channel that has already advanced");
6635 self.generate_accept_channel_message()
6638 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6639 /// inbound channel. If the intention is to accept an inbound channel, use
6640 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6642 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6643 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6644 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6645 let keys = self.context.get_holder_pubkeys();
6647 msgs::AcceptChannel {
6648 temporary_channel_id: self.context.channel_id,
6649 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6650 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6651 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6652 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6653 minimum_depth: self.context.minimum_depth.unwrap(),
6654 to_self_delay: self.context.get_holder_selected_contest_delay(),
6655 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6656 funding_pubkey: keys.funding_pubkey,
6657 revocation_basepoint: keys.revocation_basepoint,
6658 payment_point: keys.payment_point,
6659 delayed_payment_basepoint: keys.delayed_payment_basepoint,
6660 htlc_basepoint: keys.htlc_basepoint,
6661 first_per_commitment_point,
6662 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6663 Some(script) => script.clone().into_inner(),
6664 None => Builder::new().into_script(),
6666 channel_type: Some(self.context.channel_type.clone()),
6668 next_local_nonce: None,
6672 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6673 /// inbound channel without accepting it.
6675 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6677 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6678 self.generate_accept_channel_message()
6681 fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
6682 let funding_script = self.context.get_funding_redeemscript();
6684 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6685 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6687 let trusted_tx = initial_commitment_tx.trust();
6688 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6689 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6690 // They sign the holder commitment transaction...
6691 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6692 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6693 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6694 encode::serialize_hex(&funding_script), &self.context.channel_id());
6695 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6698 let counterparty_keys = self.context.build_remote_transaction_keys();
6699 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6701 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6702 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6703 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6704 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6706 match &self.context.holder_signer {
6707 // TODO (arik): move match into calling method for Taproot
6708 ChannelSignerType::Ecdsa(ecdsa) => {
6709 let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
6710 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
6712 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
6713 Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature))
6718 pub fn funding_created<L: Deref>(
6719 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6720 ) -> Result<(Channel<SP>, msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
6724 if self.context.is_outbound() {
6725 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6727 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6728 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6729 // remember the channel, so it's safe to just send an error_message here and drop the
6731 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6733 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6734 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6735 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6736 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6739 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6740 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6741 // This is an externally observable change before we finish all our checks. In particular
6742 // funding_created_signature may fail.
6743 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6745 let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
6747 Err(ChannelError::Close(e)) => {
6748 self.context.channel_transaction_parameters.funding_outpoint = None;
6749 return Err((self, ChannelError::Close(e)));
6752 // The only error we know how to handle is ChannelError::Close, so we fall over here
6753 // to make sure we don't continue with an inconsistent state.
6754 panic!("unexpected error type from funding_created_signature {:?}", e);
6758 let holder_commitment_tx = HolderCommitmentTransaction::new(
6759 initial_commitment_tx,
6762 &self.context.get_holder_pubkeys().funding_pubkey,
6763 self.context.counterparty_funding_pubkey()
6766 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6767 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6770 // Now that we're past error-generating stuff, update our local state:
6772 let funding_redeemscript = self.context.get_funding_redeemscript();
6773 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6774 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6775 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6776 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6777 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6778 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6779 shutdown_script, self.context.get_holder_selected_contest_delay(),
6780 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6781 &self.context.channel_transaction_parameters,
6782 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6784 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6786 channel_monitor.provide_initial_counterparty_commitment_tx(
6787 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6788 self.context.cur_counterparty_commitment_transaction_number,
6789 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6790 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6791 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6793 self.context.channel_state = ChannelState::FundingSent as u32;
6794 self.context.channel_id = funding_txo.to_channel_id();
6795 self.context.cur_counterparty_commitment_transaction_number -= 1;
6796 self.context.cur_holder_commitment_transaction_number -= 1;
6798 log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
6800 // Promote the channel to a full-fledged one now that we have updated the state and have a
6801 // `ChannelMonitor`.
6802 let mut channel = Channel {
6803 context: self.context,
6805 let channel_id = channel.context.channel_id.clone();
6806 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6807 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6809 Ok((channel, msgs::FundingSigned {
6813 partial_signature_with_nonce: None,
6814 }, channel_monitor))
6818 const SERIALIZATION_VERSION: u8 = 3;
6819 const MIN_SERIALIZATION_VERSION: u8 = 2;
6821 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6827 impl Writeable for ChannelUpdateStatus {
6828 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6829 // We only care about writing out the current state as it was announced, ie only either
6830 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6831 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6833 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6834 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6835 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6836 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6842 impl Readable for ChannelUpdateStatus {
6843 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6844 Ok(match <u8 as Readable>::read(reader)? {
6845 0 => ChannelUpdateStatus::Enabled,
6846 1 => ChannelUpdateStatus::Disabled,
6847 _ => return Err(DecodeError::InvalidValue),
6852 impl Writeable for AnnouncementSigsState {
6853 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6854 // We only care about writing out the current state as if we had just disconnected, at
6855 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6857 AnnouncementSigsState::NotSent => 0u8.write(writer),
6858 AnnouncementSigsState::MessageSent => 0u8.write(writer),
6859 AnnouncementSigsState::Committed => 0u8.write(writer),
6860 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6865 impl Readable for AnnouncementSigsState {
6866 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6867 Ok(match <u8 as Readable>::read(reader)? {
6868 0 => AnnouncementSigsState::NotSent,
6869 1 => AnnouncementSigsState::PeerReceived,
6870 _ => return Err(DecodeError::InvalidValue),
6875 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
6876 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6877 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6880 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6882 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6883 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6884 // the low bytes now and the optional high bytes later.
6885 let user_id_low = self.context.user_id as u64;
6886 user_id_low.write(writer)?;
6888 // Version 1 deserializers expected to read parts of the config object here. Version 2
6889 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6890 // `minimum_depth` we simply write dummy values here.
6891 writer.write_all(&[0; 8])?;
6893 self.context.channel_id.write(writer)?;
6894 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6895 self.context.channel_value_satoshis.write(writer)?;
6897 self.context.latest_monitor_update_id.write(writer)?;
6899 let mut key_data = VecWriter(Vec::new());
6900 // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
6901 self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
6902 assert!(key_data.0.len() < core::usize::MAX);
6903 assert!(key_data.0.len() < core::u32::MAX as usize);
6904 (key_data.0.len() as u32).write(writer)?;
6905 writer.write_all(&key_data.0[..])?;
6907 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6908 // deserialized from that format.
6909 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6910 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6911 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6913 self.context.destination_script.write(writer)?;
6915 self.context.cur_holder_commitment_transaction_number.write(writer)?;
6916 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
6917 self.context.value_to_self_msat.write(writer)?;
6919 let mut dropped_inbound_htlcs = 0;
6920 for htlc in self.context.pending_inbound_htlcs.iter() {
6921 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6922 dropped_inbound_htlcs += 1;
6925 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6926 for htlc in self.context.pending_inbound_htlcs.iter() {
6927 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6930 htlc.htlc_id.write(writer)?;
6931 htlc.amount_msat.write(writer)?;
6932 htlc.cltv_expiry.write(writer)?;
6933 htlc.payment_hash.write(writer)?;
6935 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6936 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6938 htlc_state.write(writer)?;
6940 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6942 htlc_state.write(writer)?;
6944 &InboundHTLCState::Committed => {
6947 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6949 removal_reason.write(writer)?;
6954 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6955 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
6957 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
6958 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
6959 htlc.htlc_id.write(writer)?;
6960 htlc.amount_msat.write(writer)?;
6961 htlc.cltv_expiry.write(writer)?;
6962 htlc.payment_hash.write(writer)?;
6963 htlc.source.write(writer)?;
6965 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6967 onion_packet.write(writer)?;
6969 &OutboundHTLCState::Committed => {
6972 &OutboundHTLCState::RemoteRemoved(_) => {
6973 // Treat this as a Committed because we haven't received the CS - they'll
6974 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6977 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6979 if let OutboundHTLCOutcome::Success(preimage) = outcome {
6980 preimages.push(preimage);
6982 let reason: Option<&HTLCFailReason> = outcome.into();
6983 reason.write(writer)?;
6985 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6987 if let OutboundHTLCOutcome::Success(preimage) = outcome {
6988 preimages.push(preimage);
6990 let reason: Option<&HTLCFailReason> = outcome.into();
6991 reason.write(writer)?;
6994 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
6995 if pending_outbound_skimmed_fees.is_empty() {
6996 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
6998 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
6999 } else if !pending_outbound_skimmed_fees.is_empty() {
7000 pending_outbound_skimmed_fees.push(None);
7004 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7005 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7006 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7008 &HTLCUpdateAwaitingACK::AddHTLC {
7009 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7013 amount_msat.write(writer)?;
7014 cltv_expiry.write(writer)?;
7015 payment_hash.write(writer)?;
7016 source.write(writer)?;
7017 onion_routing_packet.write(writer)?;
7019 if let Some(skimmed_fee) = skimmed_fee_msat {
7020 if holding_cell_skimmed_fees.is_empty() {
7021 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7023 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7024 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7026 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7028 payment_preimage.write(writer)?;
7029 htlc_id.write(writer)?;
7031 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7033 htlc_id.write(writer)?;
7034 err_packet.write(writer)?;
7039 match self.context.resend_order {
7040 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7041 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7044 self.context.monitor_pending_channel_ready.write(writer)?;
7045 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7046 self.context.monitor_pending_commitment_signed.write(writer)?;
7048 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7049 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7050 pending_forward.write(writer)?;
7051 htlc_id.write(writer)?;
7054 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7055 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7056 htlc_source.write(writer)?;
7057 payment_hash.write(writer)?;
7058 fail_reason.write(writer)?;
7061 if self.context.is_outbound() {
7062 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7063 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7064 Some(feerate).write(writer)?;
7066 // As for inbound HTLCs, if the update was only announced and never committed in a
7067 // commitment_signed, drop it.
7068 None::<u32>.write(writer)?;
7070 self.context.holding_cell_update_fee.write(writer)?;
7072 self.context.next_holder_htlc_id.write(writer)?;
7073 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7074 self.context.update_time_counter.write(writer)?;
7075 self.context.feerate_per_kw.write(writer)?;
7077 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7078 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7079 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7080 // consider the stale state on reload.
7083 self.context.funding_tx_confirmed_in.write(writer)?;
7084 self.context.funding_tx_confirmation_height.write(writer)?;
7085 self.context.short_channel_id.write(writer)?;
7087 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7088 self.context.holder_dust_limit_satoshis.write(writer)?;
7089 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7091 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7092 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7094 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7095 self.context.holder_htlc_minimum_msat.write(writer)?;
7096 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7098 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7099 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7101 match &self.context.counterparty_forwarding_info {
7104 info.fee_base_msat.write(writer)?;
7105 info.fee_proportional_millionths.write(writer)?;
7106 info.cltv_expiry_delta.write(writer)?;
7108 None => 0u8.write(writer)?
7111 self.context.channel_transaction_parameters.write(writer)?;
7112 self.context.funding_transaction.write(writer)?;
7114 self.context.counterparty_cur_commitment_point.write(writer)?;
7115 self.context.counterparty_prev_commitment_point.write(writer)?;
7116 self.context.counterparty_node_id.write(writer)?;
7118 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7120 self.context.commitment_secrets.write(writer)?;
7122 self.context.channel_update_status.write(writer)?;
7124 #[cfg(any(test, fuzzing))]
7125 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7126 #[cfg(any(test, fuzzing))]
7127 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7128 htlc.write(writer)?;
7131 // If the channel type is something other than only-static-remote-key, then we need to have
7132 // older clients fail to deserialize this channel at all. If the type is
7133 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7135 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7136 Some(&self.context.channel_type) } else { None };
7138 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7139 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7140 // a different percentage of the channel value then 10%, which older versions of LDK used
7141 // to set it to before the percentage was made configurable.
7142 let serialized_holder_selected_reserve =
7143 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7144 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7146 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7147 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7148 let serialized_holder_htlc_max_in_flight =
7149 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7150 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7152 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7153 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7155 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7156 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7157 // we write the high bytes as an option here.
7158 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7160 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7162 write_tlv_fields!(writer, {
7163 (0, self.context.announcement_sigs, option),
7164 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7165 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7166 // them twice, once with their original default values above, and once as an option
7167 // here. On the read side, old versions will simply ignore the odd-type entries here,
7168 // and new versions map the default values to None and allow the TLV entries here to
7170 (1, self.context.minimum_depth, option),
7171 (2, chan_type, option),
7172 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7173 (4, serialized_holder_selected_reserve, option),
7174 (5, self.context.config, required),
7175 (6, serialized_holder_htlc_max_in_flight, option),
7176 (7, self.context.shutdown_scriptpubkey, option),
7177 (8, self.context.blocked_monitor_updates, optional_vec),
7178 (9, self.context.target_closing_feerate_sats_per_kw, option),
7179 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7180 (13, self.context.channel_creation_height, required),
7181 (15, preimages, required_vec),
7182 (17, self.context.announcement_sigs_state, required),
7183 (19, self.context.latest_inbound_scid_alias, option),
7184 (21, self.context.outbound_scid_alias, required),
7185 (23, channel_ready_event_emitted, option),
7186 (25, user_id_high_opt, option),
7187 (27, self.context.channel_keys_id, required),
7188 (28, holder_max_accepted_htlcs, option),
7189 (29, self.context.temporary_channel_id, option),
7190 (31, channel_pending_event_emitted, option),
7191 (35, pending_outbound_skimmed_fees, optional_vec),
7192 (37, holding_cell_skimmed_fees, optional_vec),
7193 (38, self.context.is_batch_funding, option),
7200 const MAX_ALLOC_SIZE: usize = 64*1024;
7201 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7203 ES::Target: EntropySource,
7204 SP::Target: SignerProvider
7206 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7207 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7208 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7210 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7211 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7212 // the low bytes now and the high bytes later.
7213 let user_id_low: u64 = Readable::read(reader)?;
7215 let mut config = Some(LegacyChannelConfig::default());
7217 // Read the old serialization of the ChannelConfig from version 0.0.98.
7218 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7219 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7220 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7221 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7223 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7224 let mut _val: u64 = Readable::read(reader)?;
7227 let channel_id = Readable::read(reader)?;
7228 let channel_state = Readable::read(reader)?;
7229 let channel_value_satoshis = Readable::read(reader)?;
7231 let latest_monitor_update_id = Readable::read(reader)?;
7233 let mut keys_data = None;
7235 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7236 // the `channel_keys_id` TLV is present below.
7237 let keys_len: u32 = Readable::read(reader)?;
7238 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7239 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7240 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7241 let mut data = [0; 1024];
7242 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7243 reader.read_exact(read_slice)?;
7244 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7248 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7249 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7250 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7253 let destination_script = Readable::read(reader)?;
7255 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7256 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7257 let value_to_self_msat = Readable::read(reader)?;
7259 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7261 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7262 for _ in 0..pending_inbound_htlc_count {
7263 pending_inbound_htlcs.push(InboundHTLCOutput {
7264 htlc_id: Readable::read(reader)?,
7265 amount_msat: Readable::read(reader)?,
7266 cltv_expiry: Readable::read(reader)?,
7267 payment_hash: Readable::read(reader)?,
7268 state: match <u8 as Readable>::read(reader)? {
7269 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7270 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7271 3 => InboundHTLCState::Committed,
7272 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7273 _ => return Err(DecodeError::InvalidValue),
7278 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7279 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7280 for _ in 0..pending_outbound_htlc_count {
7281 pending_outbound_htlcs.push(OutboundHTLCOutput {
7282 htlc_id: Readable::read(reader)?,
7283 amount_msat: Readable::read(reader)?,
7284 cltv_expiry: Readable::read(reader)?,
7285 payment_hash: Readable::read(reader)?,
7286 source: Readable::read(reader)?,
7287 state: match <u8 as Readable>::read(reader)? {
7288 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7289 1 => OutboundHTLCState::Committed,
7291 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7292 OutboundHTLCState::RemoteRemoved(option.into())
7295 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7296 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7299 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7300 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7302 _ => return Err(DecodeError::InvalidValue),
7304 skimmed_fee_msat: None,
7308 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7309 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7310 for _ in 0..holding_cell_htlc_update_count {
7311 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7312 0 => HTLCUpdateAwaitingACK::AddHTLC {
7313 amount_msat: Readable::read(reader)?,
7314 cltv_expiry: Readable::read(reader)?,
7315 payment_hash: Readable::read(reader)?,
7316 source: Readable::read(reader)?,
7317 onion_routing_packet: Readable::read(reader)?,
7318 skimmed_fee_msat: None,
7320 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7321 payment_preimage: Readable::read(reader)?,
7322 htlc_id: Readable::read(reader)?,
7324 2 => HTLCUpdateAwaitingACK::FailHTLC {
7325 htlc_id: Readable::read(reader)?,
7326 err_packet: Readable::read(reader)?,
7328 _ => return Err(DecodeError::InvalidValue),
7332 let resend_order = match <u8 as Readable>::read(reader)? {
7333 0 => RAACommitmentOrder::CommitmentFirst,
7334 1 => RAACommitmentOrder::RevokeAndACKFirst,
7335 _ => return Err(DecodeError::InvalidValue),
7338 let monitor_pending_channel_ready = Readable::read(reader)?;
7339 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7340 let monitor_pending_commitment_signed = Readable::read(reader)?;
7342 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7343 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7344 for _ in 0..monitor_pending_forwards_count {
7345 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7348 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7349 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7350 for _ in 0..monitor_pending_failures_count {
7351 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7354 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7356 let holding_cell_update_fee = Readable::read(reader)?;
7358 let next_holder_htlc_id = Readable::read(reader)?;
7359 let next_counterparty_htlc_id = Readable::read(reader)?;
7360 let update_time_counter = Readable::read(reader)?;
7361 let feerate_per_kw = Readable::read(reader)?;
7363 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7364 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7365 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7366 // consider the stale state on reload.
7367 match <u8 as Readable>::read(reader)? {
7370 let _: u32 = Readable::read(reader)?;
7371 let _: u64 = Readable::read(reader)?;
7372 let _: Signature = Readable::read(reader)?;
7374 _ => return Err(DecodeError::InvalidValue),
7377 let funding_tx_confirmed_in = Readable::read(reader)?;
7378 let funding_tx_confirmation_height = Readable::read(reader)?;
7379 let short_channel_id = Readable::read(reader)?;
7381 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7382 let holder_dust_limit_satoshis = Readable::read(reader)?;
7383 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7384 let mut counterparty_selected_channel_reserve_satoshis = None;
7386 // Read the old serialization from version 0.0.98.
7387 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7389 // Read the 8 bytes of backwards-compatibility data.
7390 let _dummy: u64 = Readable::read(reader)?;
7392 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7393 let holder_htlc_minimum_msat = Readable::read(reader)?;
7394 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7396 let mut minimum_depth = None;
7398 // Read the old serialization from version 0.0.98.
7399 minimum_depth = Some(Readable::read(reader)?);
7401 // Read the 4 bytes of backwards-compatibility data.
7402 let _dummy: u32 = Readable::read(reader)?;
7405 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7407 1 => Some(CounterpartyForwardingInfo {
7408 fee_base_msat: Readable::read(reader)?,
7409 fee_proportional_millionths: Readable::read(reader)?,
7410 cltv_expiry_delta: Readable::read(reader)?,
7412 _ => return Err(DecodeError::InvalidValue),
7415 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7416 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7418 let counterparty_cur_commitment_point = Readable::read(reader)?;
7420 let counterparty_prev_commitment_point = Readable::read(reader)?;
7421 let counterparty_node_id = Readable::read(reader)?;
7423 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7424 let commitment_secrets = Readable::read(reader)?;
7426 let channel_update_status = Readable::read(reader)?;
7428 #[cfg(any(test, fuzzing))]
7429 let mut historical_inbound_htlc_fulfills = HashSet::new();
7430 #[cfg(any(test, fuzzing))]
7432 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7433 for _ in 0..htlc_fulfills_len {
7434 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7438 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7439 Some((feerate, if channel_parameters.is_outbound_from_holder {
7440 FeeUpdateState::Outbound
7442 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7448 let mut announcement_sigs = None;
7449 let mut target_closing_feerate_sats_per_kw = None;
7450 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7451 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7452 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7453 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7454 // only, so we default to that if none was written.
7455 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7456 let mut channel_creation_height = Some(serialized_height);
7457 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7459 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7460 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7461 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7462 let mut latest_inbound_scid_alias = None;
7463 let mut outbound_scid_alias = None;
7464 let mut channel_pending_event_emitted = None;
7465 let mut channel_ready_event_emitted = None;
7467 let mut user_id_high_opt: Option<u64> = None;
7468 let mut channel_keys_id: Option<[u8; 32]> = None;
7469 let mut temporary_channel_id: Option<ChannelId> = None;
7470 let mut holder_max_accepted_htlcs: Option<u16> = None;
7472 let mut blocked_monitor_updates = Some(Vec::new());
7474 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7475 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7477 let mut is_batch_funding: Option<()> = None;
7479 read_tlv_fields!(reader, {
7480 (0, announcement_sigs, option),
7481 (1, minimum_depth, option),
7482 (2, channel_type, option),
7483 (3, counterparty_selected_channel_reserve_satoshis, option),
7484 (4, holder_selected_channel_reserve_satoshis, option),
7485 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7486 (6, holder_max_htlc_value_in_flight_msat, option),
7487 (7, shutdown_scriptpubkey, option),
7488 (8, blocked_monitor_updates, optional_vec),
7489 (9, target_closing_feerate_sats_per_kw, option),
7490 (11, monitor_pending_finalized_fulfills, optional_vec),
7491 (13, channel_creation_height, option),
7492 (15, preimages_opt, optional_vec),
7493 (17, announcement_sigs_state, option),
7494 (19, latest_inbound_scid_alias, option),
7495 (21, outbound_scid_alias, option),
7496 (23, channel_ready_event_emitted, option),
7497 (25, user_id_high_opt, option),
7498 (27, channel_keys_id, option),
7499 (28, holder_max_accepted_htlcs, option),
7500 (29, temporary_channel_id, option),
7501 (31, channel_pending_event_emitted, option),
7502 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7503 (37, holding_cell_skimmed_fees_opt, optional_vec),
7504 (38, is_batch_funding, option),
7507 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7508 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7509 // If we've gotten to the funding stage of the channel, populate the signer with its
7510 // required channel parameters.
7511 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7512 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7513 holder_signer.provide_channel_parameters(&channel_parameters);
7515 (channel_keys_id, holder_signer)
7517 // `keys_data` can be `None` if we had corrupted data.
7518 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7519 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7520 (holder_signer.channel_keys_id(), holder_signer)
7523 if let Some(preimages) = preimages_opt {
7524 let mut iter = preimages.into_iter();
7525 for htlc in pending_outbound_htlcs.iter_mut() {
7527 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7528 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7530 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7531 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7536 // We expect all preimages to be consumed above
7537 if iter.next().is_some() {
7538 return Err(DecodeError::InvalidValue);
7542 let chan_features = channel_type.as_ref().unwrap();
7543 if !chan_features.is_subset(our_supported_features) {
7544 // If the channel was written by a new version and negotiated with features we don't
7545 // understand yet, refuse to read it.
7546 return Err(DecodeError::UnknownRequiredFeature);
7549 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7550 // To account for that, we're proactively setting/overriding the field here.
7551 channel_parameters.channel_type_features = chan_features.clone();
7553 let mut secp_ctx = Secp256k1::new();
7554 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7556 // `user_id` used to be a single u64 value. In order to remain backwards
7557 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7558 // separate u64 values.
7559 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7561 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7563 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7564 let mut iter = skimmed_fees.into_iter();
7565 for htlc in pending_outbound_htlcs.iter_mut() {
7566 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7568 // We expect all skimmed fees to be consumed above
7569 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7571 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7572 let mut iter = skimmed_fees.into_iter();
7573 for htlc in holding_cell_htlc_updates.iter_mut() {
7574 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7575 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7578 // We expect all skimmed fees to be consumed above
7579 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7583 context: ChannelContext {
7586 config: config.unwrap(),
7590 // Note that we don't care about serializing handshake limits as we only ever serialize
7591 // channel data after the handshake has completed.
7592 inbound_handshake_limits_override: None,
7595 temporary_channel_id,
7597 announcement_sigs_state: announcement_sigs_state.unwrap(),
7599 channel_value_satoshis,
7601 latest_monitor_update_id,
7603 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7604 shutdown_scriptpubkey,
7607 cur_holder_commitment_transaction_number,
7608 cur_counterparty_commitment_transaction_number,
7611 holder_max_accepted_htlcs,
7612 pending_inbound_htlcs,
7613 pending_outbound_htlcs,
7614 holding_cell_htlc_updates,
7618 monitor_pending_channel_ready,
7619 monitor_pending_revoke_and_ack,
7620 monitor_pending_commitment_signed,
7621 monitor_pending_forwards,
7622 monitor_pending_failures,
7623 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7625 signer_pending_commitment_update: false,
7628 holding_cell_update_fee,
7629 next_holder_htlc_id,
7630 next_counterparty_htlc_id,
7631 update_time_counter,
7634 #[cfg(debug_assertions)]
7635 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7636 #[cfg(debug_assertions)]
7637 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7639 last_sent_closing_fee: None,
7640 pending_counterparty_closing_signed: None,
7641 closing_fee_limits: None,
7642 target_closing_feerate_sats_per_kw,
7644 funding_tx_confirmed_in,
7645 funding_tx_confirmation_height,
7647 channel_creation_height: channel_creation_height.unwrap(),
7649 counterparty_dust_limit_satoshis,
7650 holder_dust_limit_satoshis,
7651 counterparty_max_htlc_value_in_flight_msat,
7652 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7653 counterparty_selected_channel_reserve_satoshis,
7654 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7655 counterparty_htlc_minimum_msat,
7656 holder_htlc_minimum_msat,
7657 counterparty_max_accepted_htlcs,
7660 counterparty_forwarding_info,
7662 channel_transaction_parameters: channel_parameters,
7663 funding_transaction,
7666 counterparty_cur_commitment_point,
7667 counterparty_prev_commitment_point,
7668 counterparty_node_id,
7670 counterparty_shutdown_scriptpubkey,
7674 channel_update_status,
7675 closing_signed_in_flight: false,
7679 #[cfg(any(test, fuzzing))]
7680 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7681 #[cfg(any(test, fuzzing))]
7682 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7684 workaround_lnd_bug_4006: None,
7685 sent_message_awaiting_response: None,
7687 latest_inbound_scid_alias,
7688 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7689 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7691 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7692 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7694 #[cfg(any(test, fuzzing))]
7695 historical_inbound_htlc_fulfills,
7697 channel_type: channel_type.unwrap(),
7700 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7709 use bitcoin::blockdata::constants::ChainHash;
7710 use bitcoin::blockdata::script::{Script, Builder};
7711 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7712 use bitcoin::blockdata::opcodes;
7713 use bitcoin::network::constants::Network;
7715 use crate::ln::PaymentHash;
7716 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7717 use crate::ln::channel::InitFeatures;
7718 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7719 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7720 use crate::ln::features::ChannelTypeFeatures;
7721 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7722 use crate::ln::script::ShutdownScript;
7723 use crate::ln::chan_utils;
7724 use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7725 use crate::chain::BestBlock;
7726 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7727 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7728 use crate::chain::transaction::OutPoint;
7729 use crate::routing::router::Path;
7730 use crate::util::config::UserConfig;
7731 use crate::util::errors::APIError;
7732 use crate::util::test_utils;
7733 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7734 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7735 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7736 use bitcoin::secp256k1::{SecretKey,PublicKey};
7737 use bitcoin::hashes::sha256::Hash as Sha256;
7738 use bitcoin::hashes::Hash;
7739 use bitcoin::hash_types::WPubkeyHash;
7740 use bitcoin::PackedLockTime;
7741 use bitcoin::util::address::WitnessVersion;
7742 use crate::prelude::*;
7744 struct TestFeeEstimator {
7747 impl FeeEstimator for TestFeeEstimator {
7748 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7754 fn test_max_funding_satoshis_no_wumbo() {
7755 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7756 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7757 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7761 fn test_no_fee_check_overflow() {
7762 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7763 // arithmetic, causing a panic with debug assertions enabled.
7764 let fee_est = TestFeeEstimator { fee_est: 42 };
7765 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7766 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
7767 &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
7768 u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7772 signer: InMemorySigner,
7775 impl EntropySource for Keys {
7776 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7779 impl SignerProvider for Keys {
7780 type Signer = InMemorySigner;
7782 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7783 self.signer.channel_keys_id()
7786 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7790 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7792 fn get_destination_script(&self) -> Result<Script, ()> {
7793 let secp_ctx = Secp256k1::signing_only();
7794 let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7795 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7796 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7799 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7800 let secp_ctx = Secp256k1::signing_only();
7801 let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7802 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7806 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7807 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7808 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7812 fn upfront_shutdown_script_incompatibility() {
7813 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7814 let non_v0_segwit_shutdown_script =
7815 ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7817 let seed = [42; 32];
7818 let network = Network::Testnet;
7819 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7820 keys_provider.expect(OnGetShutdownScriptpubkey {
7821 returns: non_v0_segwit_shutdown_script.clone(),
7824 let secp_ctx = Secp256k1::new();
7825 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7826 let config = UserConfig::default();
7827 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7828 Err(APIError::IncompatibleShutdownScript { script }) => {
7829 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7831 Err(e) => panic!("Unexpected error: {:?}", e),
7832 Ok(_) => panic!("Expected error"),
7836 // Check that, during channel creation, we use the same feerate in the open channel message
7837 // as we do in the Channel object creation itself.
7839 fn test_open_channel_msg_fee() {
7840 let original_fee = 253;
7841 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7842 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7843 let secp_ctx = Secp256k1::new();
7844 let seed = [42; 32];
7845 let network = Network::Testnet;
7846 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7848 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7849 let config = UserConfig::default();
7850 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7852 // Now change the fee so we can check that the fee in the open_channel message is the
7853 // same as the old fee.
7854 fee_est.fee_est = 500;
7855 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7856 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7860 fn test_holder_vs_counterparty_dust_limit() {
7861 // Test that when calculating the local and remote commitment transaction fees, the correct
7862 // dust limits are used.
7863 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7864 let secp_ctx = Secp256k1::new();
7865 let seed = [42; 32];
7866 let network = Network::Testnet;
7867 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7868 let logger = test_utils::TestLogger::new();
7869 let best_block = BestBlock::from_network(network);
7871 // Go through the flow of opening a channel between two nodes, making sure
7872 // they have different dust limits.
7874 // Create Node A's channel pointing to Node B's pubkey
7875 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7876 let config = UserConfig::default();
7877 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7879 // Create Node B's channel by receiving Node A's open_channel message
7880 // Make sure A's dust limit is as we expect.
7881 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7882 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7883 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7885 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7886 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
7887 accept_channel_msg.dust_limit_satoshis = 546;
7888 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7889 node_a_chan.context.holder_dust_limit_satoshis = 1560;
7891 // Node A --> Node B: funding created
7892 let output_script = node_a_chan.context.get_funding_redeemscript();
7893 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7894 value: 10000000, script_pubkey: output_script.clone(),
7896 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7897 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
7898 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
7900 // Node B --> Node A: funding signed
7901 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
7903 // Put some inbound and outbound HTLCs in A's channel.
7904 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7905 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
7907 amount_msat: htlc_amount_msat,
7908 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7909 cltv_expiry: 300000000,
7910 state: InboundHTLCState::Committed,
7913 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7915 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7916 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7917 cltv_expiry: 200000000,
7918 state: OutboundHTLCState::Committed,
7919 source: HTLCSource::OutboundRoute {
7920 path: Path { hops: Vec::new(), blinded_tail: None },
7921 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7922 first_hop_htlc_msat: 548,
7923 payment_id: PaymentId([42; 32]),
7925 skimmed_fee_msat: None,
7928 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7929 // the dust limit check.
7930 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7931 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7932 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
7933 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7935 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7936 // of the HTLCs are seen to be above the dust limit.
7937 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7938 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
7939 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7940 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7941 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7945 fn test_timeout_vs_success_htlc_dust_limit() {
7946 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7947 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7948 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7949 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7950 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7951 let secp_ctx = Secp256k1::new();
7952 let seed = [42; 32];
7953 let network = Network::Testnet;
7954 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7956 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7957 let config = UserConfig::default();
7958 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7960 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
7961 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
7963 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7964 // counted as dust when it shouldn't be.
7965 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
7966 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7967 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7968 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7970 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7971 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
7972 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7973 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
7974 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7976 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
7978 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7979 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
7980 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7981 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7982 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7984 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7985 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
7986 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7987 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7988 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7992 fn channel_reestablish_no_updates() {
7993 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7994 let logger = test_utils::TestLogger::new();
7995 let secp_ctx = Secp256k1::new();
7996 let seed = [42; 32];
7997 let network = Network::Testnet;
7998 let best_block = BestBlock::from_network(network);
7999 let chain_hash = ChainHash::using_genesis_block(network);
8000 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8002 // Go through the flow of opening a channel between two nodes.
8004 // Create Node A's channel pointing to Node B's pubkey
8005 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8006 let config = UserConfig::default();
8007 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8009 // Create Node B's channel by receiving Node A's open_channel message
8010 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8011 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8012 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8014 // Node B --> Node A: accept channel
8015 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8016 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8018 // Node A --> Node B: funding created
8019 let output_script = node_a_chan.context.get_funding_redeemscript();
8020 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8021 value: 10000000, script_pubkey: output_script.clone(),
8023 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8024 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8025 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8027 // Node B --> Node A: funding signed
8028 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8030 // Now disconnect the two nodes and check that the commitment point in
8031 // Node B's channel_reestablish message is sane.
8032 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8033 let msg = node_b_chan.get_channel_reestablish(&&logger);
8034 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8035 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8036 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8038 // Check that the commitment point in Node A's channel_reestablish message
8040 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8041 let msg = node_a_chan.get_channel_reestablish(&&logger);
8042 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8043 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8044 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8048 fn test_configured_holder_max_htlc_value_in_flight() {
8049 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8050 let logger = test_utils::TestLogger::new();
8051 let secp_ctx = Secp256k1::new();
8052 let seed = [42; 32];
8053 let network = Network::Testnet;
8054 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8055 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8056 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8058 let mut config_2_percent = UserConfig::default();
8059 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8060 let mut config_99_percent = UserConfig::default();
8061 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8062 let mut config_0_percent = UserConfig::default();
8063 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8064 let mut config_101_percent = UserConfig::default();
8065 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8067 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8068 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8069 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8070 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
8071 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8072 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8074 // Test with the upper bound - 1 of valid values (99%).
8075 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
8076 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8077 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8079 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8081 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8082 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8083 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8084 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8085 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8086 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8088 // Test with the upper bound - 1 of valid values (99%).
8089 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8090 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8091 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8093 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8094 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8095 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
8096 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8097 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8099 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8100 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8102 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
8103 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8104 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8106 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8107 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8108 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8109 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8110 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8112 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8113 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8115 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8116 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8117 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8121 fn test_configured_holder_selected_channel_reserve_satoshis() {
8123 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8124 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8125 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8127 // Test with valid but unreasonably high channel reserves
8128 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8129 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8130 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8132 // Test with calculated channel reserve less than lower bound
8133 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8134 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8136 // Test with invalid channel reserves since sum of both is greater than or equal
8138 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8139 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8142 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8143 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8144 let logger = test_utils::TestLogger::new();
8145 let secp_ctx = Secp256k1::new();
8146 let seed = [42; 32];
8147 let network = Network::Testnet;
8148 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8149 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8150 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8153 let mut outbound_node_config = UserConfig::default();
8154 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8155 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
8157 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8158 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8160 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8161 let mut inbound_node_config = UserConfig::default();
8162 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8164 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8165 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8167 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8169 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8170 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8172 // Channel Negotiations failed
8173 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8174 assert!(result.is_err());
8179 fn channel_update() {
8180 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8181 let logger = test_utils::TestLogger::new();
8182 let secp_ctx = Secp256k1::new();
8183 let seed = [42; 32];
8184 let network = Network::Testnet;
8185 let best_block = BestBlock::from_network(network);
8186 let chain_hash = ChainHash::using_genesis_block(network);
8187 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8189 // Create Node A's channel pointing to Node B's pubkey
8190 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8191 let config = UserConfig::default();
8192 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8194 // Create Node B's channel by receiving Node A's open_channel message
8195 // Make sure A's dust limit is as we expect.
8196 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8197 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8198 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8200 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8201 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8202 accept_channel_msg.dust_limit_satoshis = 546;
8203 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8204 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8206 // Node A --> Node B: funding created
8207 let output_script = node_a_chan.context.get_funding_redeemscript();
8208 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8209 value: 10000000, script_pubkey: output_script.clone(),
8211 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8212 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8213 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8215 // Node B --> Node A: funding signed
8216 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
8218 // Make sure that receiving a channel update will update the Channel as expected.
8219 let update = ChannelUpdate {
8220 contents: UnsignedChannelUpdate {
8222 short_channel_id: 0,
8225 cltv_expiry_delta: 100,
8226 htlc_minimum_msat: 5,
8227 htlc_maximum_msat: MAX_VALUE_MSAT,
8229 fee_proportional_millionths: 11,
8230 excess_data: Vec::new(),
8232 signature: Signature::from(unsafe { FFISignature::new() })
8234 assert!(node_a_chan.channel_update(&update).unwrap());
8236 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8237 // change our official htlc_minimum_msat.
8238 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8239 match node_a_chan.context.counterparty_forwarding_info() {
8241 assert_eq!(info.cltv_expiry_delta, 100);
8242 assert_eq!(info.fee_base_msat, 110);
8243 assert_eq!(info.fee_proportional_millionths, 11);
8245 None => panic!("expected counterparty forwarding info to be Some")
8248 assert!(!node_a_chan.channel_update(&update).unwrap());
8251 #[cfg(feature = "_test_vectors")]
8253 fn outbound_commitment_test() {
8254 use bitcoin::util::sighash;
8255 use bitcoin::consensus::encode::serialize;
8256 use bitcoin::blockdata::transaction::EcdsaSighashType;
8257 use bitcoin::hashes::hex::FromHex;
8258 use bitcoin::hash_types::Txid;
8259 use bitcoin::secp256k1::Message;
8260 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
8261 use crate::ln::PaymentPreimage;
8262 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8263 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8264 use crate::util::logger::Logger;
8265 use crate::sync::Arc;
8267 // Test vectors from BOLT 3 Appendices C and F (anchors):
8268 let feeest = TestFeeEstimator{fee_est: 15000};
8269 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8270 let secp_ctx = Secp256k1::new();
8272 let mut signer = InMemorySigner::new(
8274 SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8275 SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8276 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8277 SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8278 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8280 // These aren't set in the test vectors:
8281 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8287 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8288 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8289 let keys_provider = Keys { signer: signer.clone() };
8291 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8292 let mut config = UserConfig::default();
8293 config.channel_handshake_config.announced_channel = false;
8294 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
8295 chan.context.holder_dust_limit_satoshis = 546;
8296 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8298 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8300 let counterparty_pubkeys = ChannelPublicKeys {
8301 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8302 revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8303 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8304 delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8305 htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8307 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8308 CounterpartyChannelTransactionParameters {
8309 pubkeys: counterparty_pubkeys.clone(),
8310 selected_contest_delay: 144
8312 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8313 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8315 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8316 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8318 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8319 hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8321 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8322 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8324 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8325 // derived from a commitment_seed, so instead we copy it here and call
8326 // build_commitment_transaction.
8327 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8328 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8329 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8330 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8331 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8333 macro_rules! test_commitment {
8334 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8335 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8336 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8340 macro_rules! test_commitment_with_anchors {
8341 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8342 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8343 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8347 macro_rules! test_commitment_common {
8348 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8349 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8351 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8352 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8354 let htlcs = commitment_stats.htlcs_included.drain(..)
8355 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8357 (commitment_stats.tx, htlcs)
8359 let trusted_tx = commitment_tx.trust();
8360 let unsigned_tx = trusted_tx.built_transaction();
8361 let redeemscript = chan.context.get_funding_redeemscript();
8362 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8363 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8364 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8365 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8367 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8368 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8369 let mut counterparty_htlc_sigs = Vec::new();
8370 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8372 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8373 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8374 counterparty_htlc_sigs.push(remote_signature);
8376 assert_eq!(htlcs.len(), per_htlc.len());
8378 let holder_commitment_tx = HolderCommitmentTransaction::new(
8379 commitment_tx.clone(),
8380 counterparty_signature,
8381 counterparty_htlc_sigs,
8382 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8383 chan.context.counterparty_funding_pubkey()
8385 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8386 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8388 let funding_redeemscript = chan.context.get_funding_redeemscript();
8389 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8390 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8392 // ((htlc, counterparty_sig), (index, holder_sig))
8393 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8396 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8397 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8399 let ref htlc = htlcs[$htlc_idx];
8400 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8401 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8402 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8403 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8404 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8405 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8406 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8408 let mut preimage: Option<PaymentPreimage> = None;
8411 let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8412 if out == htlc.payment_hash {
8413 preimage = Some(PaymentPreimage([i; 32]));
8417 assert!(preimage.is_some());
8420 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8421 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8422 channel_derivation_parameters: ChannelDerivationParameters {
8423 value_satoshis: chan.context.channel_value_satoshis,
8424 keys_id: chan.context.channel_keys_id,
8425 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8427 commitment_txid: trusted_tx.txid(),
8428 per_commitment_number: trusted_tx.commitment_number(),
8429 per_commitment_point: trusted_tx.per_commitment_point(),
8430 feerate_per_kw: trusted_tx.feerate_per_kw(),
8432 preimage: preimage.clone(),
8433 counterparty_sig: *htlc_counterparty_sig,
8434 }, &secp_ctx).unwrap();
8435 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8436 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8438 let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8439 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8440 let trusted_tx = holder_commitment_tx.trust();
8441 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8442 log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
8443 assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8445 assert!(htlc_counterparty_sig_iter.next().is_none());
8449 // anchors: simple commitment tx with no HTLCs and single anchor
8450 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8451 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8452 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8454 // simple commitment tx with no HTLCs
8455 chan.context.value_to_self_msat = 7000000000;
8457 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8458 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8459 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8461 // anchors: simple commitment tx with no HTLCs
8462 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8463 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8464 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8466 chan.context.pending_inbound_htlcs.push({
8467 let mut out = InboundHTLCOutput{
8469 amount_msat: 1000000,
8471 payment_hash: PaymentHash([0; 32]),
8472 state: InboundHTLCState::Committed,
8474 out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8477 chan.context.pending_inbound_htlcs.push({
8478 let mut out = InboundHTLCOutput{
8480 amount_msat: 2000000,
8482 payment_hash: PaymentHash([0; 32]),
8483 state: InboundHTLCState::Committed,
8485 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8488 chan.context.pending_outbound_htlcs.push({
8489 let mut out = OutboundHTLCOutput{
8491 amount_msat: 2000000,
8493 payment_hash: PaymentHash([0; 32]),
8494 state: OutboundHTLCState::Committed,
8495 source: HTLCSource::dummy(),
8496 skimmed_fee_msat: None,
8498 out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8501 chan.context.pending_outbound_htlcs.push({
8502 let mut out = OutboundHTLCOutput{
8504 amount_msat: 3000000,
8506 payment_hash: PaymentHash([0; 32]),
8507 state: OutboundHTLCState::Committed,
8508 source: HTLCSource::dummy(),
8509 skimmed_fee_msat: None,
8511 out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8514 chan.context.pending_inbound_htlcs.push({
8515 let mut out = InboundHTLCOutput{
8517 amount_msat: 4000000,
8519 payment_hash: PaymentHash([0; 32]),
8520 state: InboundHTLCState::Committed,
8522 out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8526 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8527 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8528 chan.context.feerate_per_kw = 0;
8530 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8531 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8532 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8535 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8536 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8537 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8540 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8541 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8542 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8545 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8546 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8547 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8550 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8551 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8552 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8555 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8556 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8557 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8560 // commitment tx with seven outputs untrimmed (maximum feerate)
8561 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8562 chan.context.feerate_per_kw = 647;
8564 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8565 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8566 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8569 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8570 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8571 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8574 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8575 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8576 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8579 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8580 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8581 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8584 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8585 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8586 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8589 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8590 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8591 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8594 // commitment tx with six outputs untrimmed (minimum feerate)
8595 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8596 chan.context.feerate_per_kw = 648;
8598 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8599 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8600 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8603 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8604 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8605 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8608 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8609 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8610 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8613 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8614 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8615 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8618 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8619 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8620 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8623 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8624 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8625 chan.context.feerate_per_kw = 645;
8626 chan.context.holder_dust_limit_satoshis = 1001;
8628 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8629 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8630 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8633 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8634 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8635 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8638 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8639 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8640 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8643 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8644 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8645 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8648 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8649 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8650 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8653 // commitment tx with six outputs untrimmed (maximum feerate)
8654 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8655 chan.context.feerate_per_kw = 2069;
8656 chan.context.holder_dust_limit_satoshis = 546;
8658 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8659 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8660 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8663 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8664 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8665 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8668 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8669 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8670 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8673 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8674 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8675 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8678 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8679 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8680 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8683 // commitment tx with five outputs untrimmed (minimum feerate)
8684 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8685 chan.context.feerate_per_kw = 2070;
8687 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8688 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8689 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8692 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8693 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8694 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8697 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8698 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8699 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8702 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8703 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8704 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8707 // commitment tx with five outputs untrimmed (maximum feerate)
8708 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8709 chan.context.feerate_per_kw = 2194;
8711 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8712 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8713 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8716 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8717 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8718 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8721 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8722 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8723 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8726 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8727 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8728 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8731 // commitment tx with four outputs untrimmed (minimum feerate)
8732 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8733 chan.context.feerate_per_kw = 2195;
8735 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8736 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8737 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8740 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8741 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8742 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8745 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8746 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8747 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8750 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8751 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8752 chan.context.feerate_per_kw = 2185;
8753 chan.context.holder_dust_limit_satoshis = 2001;
8754 let cached_channel_type = chan.context.channel_type;
8755 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8757 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8758 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8759 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8762 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8763 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8764 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8767 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8768 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8769 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8772 // commitment tx with four outputs untrimmed (maximum feerate)
8773 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8774 chan.context.feerate_per_kw = 3702;
8775 chan.context.holder_dust_limit_satoshis = 546;
8776 chan.context.channel_type = cached_channel_type.clone();
8778 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8779 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8780 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8783 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8784 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8785 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8788 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8789 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8790 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8793 // commitment tx with three outputs untrimmed (minimum feerate)
8794 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8795 chan.context.feerate_per_kw = 3703;
8797 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8798 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8799 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8802 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8803 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8804 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8807 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8808 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8809 chan.context.feerate_per_kw = 3687;
8810 chan.context.holder_dust_limit_satoshis = 3001;
8811 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8813 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8814 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8815 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8818 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8819 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8820 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8823 // commitment tx with three outputs untrimmed (maximum feerate)
8824 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8825 chan.context.feerate_per_kw = 4914;
8826 chan.context.holder_dust_limit_satoshis = 546;
8827 chan.context.channel_type = cached_channel_type.clone();
8829 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8830 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8831 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8834 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8835 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8836 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8839 // commitment tx with two outputs untrimmed (minimum feerate)
8840 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8841 chan.context.feerate_per_kw = 4915;
8842 chan.context.holder_dust_limit_satoshis = 546;
8844 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8845 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8846 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8848 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8849 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8850 chan.context.feerate_per_kw = 4894;
8851 chan.context.holder_dust_limit_satoshis = 4001;
8852 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8854 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8855 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8856 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8858 // commitment tx with two outputs untrimmed (maximum feerate)
8859 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8860 chan.context.feerate_per_kw = 9651180;
8861 chan.context.holder_dust_limit_satoshis = 546;
8862 chan.context.channel_type = cached_channel_type.clone();
8864 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8865 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8866 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8868 // commitment tx with one output untrimmed (minimum feerate)
8869 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8870 chan.context.feerate_per_kw = 9651181;
8872 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8873 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8874 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8876 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8877 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8878 chan.context.feerate_per_kw = 6216010;
8879 chan.context.holder_dust_limit_satoshis = 4001;
8880 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8882 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8883 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8884 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8886 // commitment tx with fee greater than funder amount
8887 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8888 chan.context.feerate_per_kw = 9651936;
8889 chan.context.holder_dust_limit_satoshis = 546;
8890 chan.context.channel_type = cached_channel_type;
8892 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8893 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8894 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8896 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8897 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
8898 chan.context.feerate_per_kw = 253;
8899 chan.context.pending_inbound_htlcs.clear();
8900 chan.context.pending_inbound_htlcs.push({
8901 let mut out = InboundHTLCOutput{
8903 amount_msat: 2000000,
8905 payment_hash: PaymentHash([0; 32]),
8906 state: InboundHTLCState::Committed,
8908 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8911 chan.context.pending_outbound_htlcs.clear();
8912 chan.context.pending_outbound_htlcs.push({
8913 let mut out = OutboundHTLCOutput{
8915 amount_msat: 5000001,
8917 payment_hash: PaymentHash([0; 32]),
8918 state: OutboundHTLCState::Committed,
8919 source: HTLCSource::dummy(),
8920 skimmed_fee_msat: None,
8922 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8925 chan.context.pending_outbound_htlcs.push({
8926 let mut out = OutboundHTLCOutput{
8928 amount_msat: 5000000,
8930 payment_hash: PaymentHash([0; 32]),
8931 state: OutboundHTLCState::Committed,
8932 source: HTLCSource::dummy(),
8933 skimmed_fee_msat: None,
8935 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8939 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8940 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8941 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8944 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8945 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8946 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8948 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8949 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8950 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8952 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8953 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8954 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8957 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8958 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8959 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8960 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8963 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8964 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8965 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8967 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8968 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8969 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8971 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8972 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8973 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8978 fn test_per_commitment_secret_gen() {
8979 // Test vectors from BOLT 3 Appendix D:
8981 let mut seed = [0; 32];
8982 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8983 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8984 hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8986 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8987 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8988 hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8990 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8991 hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8993 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8994 hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8996 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8997 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8998 hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9002 fn test_key_derivation() {
9003 // Test vectors from BOLT 3 Appendix E:
9004 let secp_ctx = Secp256k1::new();
9006 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9007 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9009 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9010 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9012 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9013 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9015 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9016 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
9018 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9019 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9021 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9022 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9024 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9025 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9029 fn test_zero_conf_channel_type_support() {
9030 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9031 let secp_ctx = Secp256k1::new();
9032 let seed = [42; 32];
9033 let network = Network::Testnet;
9034 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9035 let logger = test_utils::TestLogger::new();
9037 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9038 let config = UserConfig::default();
9039 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9040 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
9042 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9043 channel_type_features.set_zero_conf_required();
9045 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9046 open_channel_msg.channel_type = Some(channel_type_features);
9047 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9048 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9049 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9050 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9051 assert!(res.is_ok());
9055 fn test_supports_anchors_zero_htlc_tx_fee() {
9056 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9057 // resulting `channel_type`.
9058 let secp_ctx = Secp256k1::new();
9059 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9060 let network = Network::Testnet;
9061 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9062 let logger = test_utils::TestLogger::new();
9064 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9065 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9067 let mut config = UserConfig::default();
9068 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9070 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9071 // need to signal it.
9072 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9073 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9074 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9077 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9079 let mut expected_channel_type = ChannelTypeFeatures::empty();
9080 expected_channel_type.set_static_remote_key_required();
9081 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9083 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9084 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9085 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9088 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9089 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9090 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9091 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9092 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9095 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9096 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9100 fn test_rejects_implicit_simple_anchors() {
9101 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9102 // each side's `InitFeatures`, it is rejected.
9103 let secp_ctx = Secp256k1::new();
9104 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9105 let network = Network::Testnet;
9106 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9107 let logger = test_utils::TestLogger::new();
9109 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9110 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9112 let config = UserConfig::default();
9114 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9115 let static_remote_key_required: u64 = 1 << 12;
9116 let simple_anchors_required: u64 = 1 << 20;
9117 let raw_init_features = static_remote_key_required | simple_anchors_required;
9118 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9120 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9121 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9122 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9125 // Set `channel_type` to `None` to force the implicit feature negotiation.
9126 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9127 open_channel_msg.channel_type = None;
9129 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9130 // `static_remote_key`, it will fail the channel.
9131 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9132 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9133 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9134 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9136 assert!(channel_b.is_err());
9140 fn test_rejects_simple_anchors_channel_type() {
9141 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9143 let secp_ctx = Secp256k1::new();
9144 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9145 let network = Network::Testnet;
9146 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9147 let logger = test_utils::TestLogger::new();
9149 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9150 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9152 let config = UserConfig::default();
9154 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9155 let static_remote_key_required: u64 = 1 << 12;
9156 let simple_anchors_required: u64 = 1 << 20;
9157 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9158 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9159 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9160 assert!(!simple_anchors_init.requires_unknown_bits());
9161 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9163 // First, we'll try to open a channel between A and B where A requests a channel type for
9164 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9165 // B as it's not supported by LDK.
9166 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9167 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9168 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9171 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9172 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9174 let res = InboundV1Channel::<&TestKeysInterface>::new(
9175 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9176 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9177 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9179 assert!(res.is_err());
9181 // Then, we'll try to open another channel where A requests a channel type for
9182 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9183 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9185 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9186 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9187 10000000, 100000, 42, &config, 0, 42
9190 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9192 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9193 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9194 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9195 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9198 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9199 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9201 let res = channel_a.accept_channel(
9202 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9204 assert!(res.is_err());
9208 fn test_waiting_for_batch() {
9209 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9210 let logger = test_utils::TestLogger::new();
9211 let secp_ctx = Secp256k1::new();
9212 let seed = [42; 32];
9213 let network = Network::Testnet;
9214 let best_block = BestBlock::from_network(network);
9215 let chain_hash = ChainHash::using_genesis_block(network);
9216 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9218 let mut config = UserConfig::default();
9219 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9220 // channel in a batch before all channels are ready.
9221 config.channel_handshake_limits.trust_own_funding_0conf = true;
9223 // Create a channel from node a to node b that will be part of batch funding.
9224 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9225 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9230 &channelmanager::provided_init_features(&config),
9239 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9240 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9241 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9246 &channelmanager::provided_channel_type_features(&config),
9247 &channelmanager::provided_init_features(&config),
9253 true, // Allow node b to send a 0conf channel_ready.
9256 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9257 node_a_chan.accept_channel(
9258 &accept_channel_msg,
9259 &config.channel_handshake_limits,
9260 &channelmanager::provided_init_features(&config),
9263 // Fund the channel with a batch funding transaction.
9264 let output_script = node_a_chan.context.get_funding_redeemscript();
9265 let tx = Transaction {
9267 lock_time: PackedLockTime::ZERO,
9271 value: 10000000, script_pubkey: output_script.clone(),
9274 value: 10000000, script_pubkey: Builder::new().into_script(),
9277 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9278 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9283 ).map_err(|_| ()).unwrap();
9284 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9285 &funding_created_msg,
9289 ).map_err(|_| ()).unwrap();
9290 let node_b_updates = node_b_chan.monitor_updating_restored(
9298 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9299 // broadcasting the funding transaction until the batch is ready.
9300 let _ = node_a_chan.funding_signed(
9301 &funding_signed_msg,
9306 let node_a_updates = node_a_chan.monitor_updating_restored(
9313 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9314 // as the funding transaction depends on all channels in the batch becoming ready.
9315 assert!(node_a_updates.channel_ready.is_none());
9316 assert!(node_a_updates.funding_broadcastable.is_none());
9318 node_a_chan.context.channel_state,
9319 ChannelState::FundingSent as u32 |
9320 ChannelState::WaitingForBatch as u32,
9323 // It is possible to receive a 0conf channel_ready from the remote node.
9324 node_a_chan.channel_ready(
9325 &node_b_updates.channel_ready.unwrap(),
9333 node_a_chan.context.channel_state,
9334 ChannelState::FundingSent as u32 |
9335 ChannelState::WaitingForBatch as u32 |
9336 ChannelState::TheirChannelReady as u32,
9339 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9340 node_a_chan.set_batch_ready();
9342 node_a_chan.context.channel_state,
9343 ChannelState::FundingSent as u32 |
9344 ChannelState::TheirChannelReady as u32,
9346 assert!(node_a_chan.check_get_channel_ready(0).is_some());